file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
box.py | so no break item, penalty is
# zero. Not actually sure this is a real case, because \hfil is always
# inserted right?
if break_item is None:
return 0
# "Each potential breakpoint has an associated 'penalty,' which
# represents the 'aesthetic cost' of breaking at that place."
# "In cases (a), (b), (c), the penalty is zero".
if isinstance(break_item, (Glue, Kern, MathOff)):
return 0
# "In case (d) an explicit penalty has been specified"
elif isinstance(break_item, Penalty):
return break_item.size
# "In case (e) the penalty is the current value of \hyphenpenalty if
# the pre-break text is nonempty, or the current value of
# \exhyphenpenalty if the pre-break text is empty."
elif isinstance(break_item, DiscretionaryBreak):
raise NotImplementedError
else:
raise ValueError(f"Item is not a break-point: {break_item}")
def is_break_point(h_list, i):
"""These rules apply to both horizontal and vertical lists, but cases
(d) and (e) should never happen.
"""
item = h_list[i]
# a) at glue, provided that this glue is immediately preceded by a non-
# discardable item, and that it is not part of a math formula (i.e.,
# not between math-on and math-off).
# A break 'at glue' occurs at the left edge of the glue space.
# TODO: Add math conditions.
if (isinstance(item, Glue)
# Check a previous item exists, and it is not discardable.
and ((i - 1) >= 0) and (not h_list[i - 1].discardable)):
return True
# b) at a kern, provided that this kern is immediately followed by
# glue, and that it is not part of a math formula.
# TODO: Add math conditions.
elif (isinstance(item, Kern)
# Check a following item exists, and it is glue.
and ((i + 1) <= (len(h_list) - 1))
and isinstance(h_list[i + 1], Glue)):
return True
# c) at a math-off that is immediately followed by glue.
elif (isinstance(item, MathOff)
# Check a following item exists, and it is glue.
and ((i + 1) <= (len(h_list) - 1))
and isinstance(h_list[i + 1], Glue)):
return True
# d) at a penalty (which might have been inserted automatically in a
# formula).
elif isinstance(item, Penalty):
return True
# e) at a discretionary break.
elif isinstance(item, DiscretionaryBreak):
return True
else:
return False
class ListElement:
def __repr__(self):
return f'{self.__class__.__name__}({self.__dict__.__repr__()})'
# All modes.
# Boxes.
def contsrep(contents, n=9):
"""Get a nice representation of the contents of a box."""
cs_rep = []
for c in contents:
if isinstance(c, Character) and c.code in printable_ascii_codes:
c_str = chr(c.code)
if cs_rep and isinstance(cs_rep[-1], str):
cs_rep[-1] += c_str
else:
cs_rep.append(c_str)
else:
cs_rep.append(c)
return truncate_list(cs_rep, n=n)
class AbstractBox(ListElement):
discardable = False
def __init__(self, contents, to=None, spread=None, set_glue=True,
offset=0):
self.to = to
self.spread = spread
if to is not None and spread is not None:
raise Exception('Cannot specify both to and spread')
self.contents = list(contents)
self.set_glue = set_glue
if set_glue:
|
self.offset = offset
def __repr__(self):
a = []
a.append(f'naturally {dimrep(self.natural_length)}')
a.append(f'minimally {dimrep(self.min_length)}')
if self.to is not None:
a.append(f'to {dimrep(self.to)}')
elif self.spread is not None:
a.append(f'spread {dimrep(self.to)}')
a.append(contsrep(self.contents))
cls_name = self.__class__.__name__
if self.set_glue:
cls_name = f'|{cls_name}|'
return drep(cls_name, a)
@property
def un_set_glues(self):
return [e for e in self.contents
if isinstance(e, Glue) and not e.is_set]
@property
def stretch(self):
return sum_infinities(g.stretch for g in self.un_set_glues)
@property
def shrink(self):
return sum_infinities(g.shrink for g in self.un_set_glues)
@property
def natural_length(self):
# The natural width, x, of the box contents is determined by adding up
# the widths of the boxes and kerns inside, together with the natural
# widths of all the glue inside.
# I'm assuming this also applies to VBoxes, but adding heights instead
# of widths. Might not be true, considering depths exist.
w = 0
for item in self.contents:
if isinstance(item, Glue):
w += item.natural_length
elif isinstance(item, Kern):
w += item.length
else:
w += self.get_length(item)
return w
@property
def min_length(self):
"""
Non-Knuthian concept, used to decide if a box is over-full: the length
even if all glue is maximally shrunk.
"""
w = 0
for item in self.contents:
if isinstance(item, Glue):
w += item.min_length
elif isinstance(item, Kern):
w += item.length
else:
w += self.get_length(item)
return w
@property
def is_over_full(self):
return self.min_length > self.desired_length
@property
def desired_length(self):
if self.to is not None:
return self.to
w = self.natural_length
if self.spread is not None:
w += self.spread
return w
def append(self, *args, **kwargs):
self.contents.append(*args, **kwargs)
def extend(self, *args, **kwargs):
self.contents.extend(*args, **kwargs)
def copy(self, *args, **kwargs):
# If glue is set, need to tell the constructor that set_glue should be
# True, but that the glue is already set.
if self.set_glue:
raise NotImplementedError('Can only copy un-set boxes at the '
'moment, because that is all that is '
'needed')
return self.__class__(contents=self.contents[:],
to=self.to, spread=self.spread, set_glue=False)
def glue_set_ratio(self):
return glue_set_ratio(self.natural_length, self.desired_length,
tuple(self.stretch), tuple(self.shrink))
def scale_and_set(self):
line_state, glue_ratio, glue_set_order = self.glue_set_ratio()
# I undo the disobeyance I did in the glue set ratio logic, to align
# with the TeXbook from now on.
if glue_ratio in (GlueRatio.no_shrinkability,
GlueRatio.no_stretchability):
glue_ratio = 0.0
# Note I've quoted this from the TeXbook, talking about setting glue in
# an H Box. But it later says that this all applies to V Boxes, so I've
# changed 'width' to 'length'.
# Every glob of glue in the list being boxed is modified. Suppose the
# glue has natural length u, stretchability y, and shrinkability z,
# where y is a jth order infinity and z is a kth order infinity.
for i, item in enumerate(self.contents):
if (not isinstance(item, Glue)) or item.is_set:
continue
g = item
if line_state == LineState.naturally_good:
glue_diff = 0
elif line_state == LineState.should_stretch:
glue_order, glue_factor = extract_dimen(g.stretch)
# [Each] glue takes the new length u + ry if j=i;
# it keeps its natural length u if j != i.
if glue_order == glue_set_order:
glue_diff = glue_ratio * glue_factor
else:
glue_diff = 0
elif line_state == LineState.should_shrink:
glue_order, glue_factor = extract_dimen(g.shrink)
# [Each] glue takes the new length u-rz if k = i; it
# keeps its natural length u if k != i.
if glue_order == glue_set_order:
glue_diff = -glue_ratio * glue_factor
else:
glue_diff = 0
else:
raise ValueError(f'Unknown line state: {line_state}')
# | self.scale_and_set() | conditional_block |
box.py | if glue_order == 0:
glue_ratio = min(glue_ratio, 1.0)
return line_state, glue_ratio, glue_order
def get_penalty(pre_break_conts, break_item):
# Will assume if breaking at end of paragraph, so no break item, penalty is
# zero. Not actually sure this is a real case, because \hfil is always
# inserted right?
if break_item is None:
return 0
# "Each potential breakpoint has an associated 'penalty,' which
# represents the 'aesthetic cost' of breaking at that place."
# "In cases (a), (b), (c), the penalty is zero".
if isinstance(break_item, (Glue, Kern, MathOff)):
return 0
# "In case (d) an explicit penalty has been specified"
elif isinstance(break_item, Penalty):
return break_item.size
# "In case (e) the penalty is the current value of \hyphenpenalty if
# the pre-break text is nonempty, or the current value of
# \exhyphenpenalty if the pre-break text is empty."
elif isinstance(break_item, DiscretionaryBreak):
raise NotImplementedError
else:
raise ValueError(f"Item is not a break-point: {break_item}")
def is_break_point(h_list, i):
"""These rules apply to both horizontal and vertical lists, but cases
(d) and (e) should never happen.
"""
item = h_list[i]
# a) at glue, provided that this glue is immediately preceded by a non-
# discardable item, and that it is not part of a math formula (i.e.,
# not between math-on and math-off).
# A break 'at glue' occurs at the left edge of the glue space.
# TODO: Add math conditions.
if (isinstance(item, Glue)
# Check a previous item exists, and it is not discardable.
and ((i - 1) >= 0) and (not h_list[i - 1].discardable)):
return True
# b) at a kern, provided that this kern is immediately followed by
# glue, and that it is not part of a math formula.
# TODO: Add math conditions.
elif (isinstance(item, Kern)
# Check a following item exists, and it is glue.
and ((i + 1) <= (len(h_list) - 1))
and isinstance(h_list[i + 1], Glue)):
return True
# c) at a math-off that is immediately followed by glue.
elif (isinstance(item, MathOff)
# Check a following item exists, and it is glue.
and ((i + 1) <= (len(h_list) - 1))
and isinstance(h_list[i + 1], Glue)):
return True
# d) at a penalty (which might have been inserted automatically in a
# formula).
elif isinstance(item, Penalty):
return True
# e) at a discretionary break.
elif isinstance(item, DiscretionaryBreak):
return True
else:
return False
class ListElement:
def __repr__(self):
return f'{self.__class__.__name__}({self.__dict__.__repr__()})'
# All modes.
# Boxes.
def contsrep(contents, n=9):
"""Get a nice representation of the contents of a box."""
cs_rep = []
for c in contents:
if isinstance(c, Character) and c.code in printable_ascii_codes:
c_str = chr(c.code)
if cs_rep and isinstance(cs_rep[-1], str):
cs_rep[-1] += c_str
else:
cs_rep.append(c_str)
else:
cs_rep.append(c)
return truncate_list(cs_rep, n=n)
class AbstractBox(ListElement):
discardable = False
def __init__(self, contents, to=None, spread=None, set_glue=True,
offset=0):
self.to = to
self.spread = spread
if to is not None and spread is not None:
raise Exception('Cannot specify both to and spread')
self.contents = list(contents)
self.set_glue = set_glue
if set_glue:
self.scale_and_set()
self.offset = offset
def __repr__(self):
a = []
a.append(f'naturally {dimrep(self.natural_length)}')
a.append(f'minimally {dimrep(self.min_length)}')
if self.to is not None:
a.append(f'to {dimrep(self.to)}')
elif self.spread is not None:
a.append(f'spread {dimrep(self.to)}')
a.append(contsrep(self.contents))
cls_name = self.__class__.__name__
if self.set_glue:
cls_name = f'|{cls_name}|'
return drep(cls_name, a)
@property
def un_set_glues(self):
return [e for e in self.contents
if isinstance(e, Glue) and not e.is_set]
@property
def stretch(self):
return sum_infinities(g.stretch for g in self.un_set_glues)
@property
def shrink(self):
return sum_infinities(g.shrink for g in self.un_set_glues)
@property
def natural_length(self):
# The natural width, x, of the box contents is determined by adding up
# the widths of the boxes and kerns inside, together with the natural
# widths of all the glue inside.
# I'm assuming this also applies to VBoxes, but adding heights instead
# of widths. Might not be true, considering depths exist.
w = 0
for item in self.contents:
if isinstance(item, Glue):
w += item.natural_length
elif isinstance(item, Kern):
w += item.length
else:
w += self.get_length(item)
return w
@property
def min_length(self):
"""
Non-Knuthian concept, used to decide if a box is over-full: the length
even if all glue is maximally shrunk.
"""
w = 0
for item in self.contents:
if isinstance(item, Glue):
w += item.min_length
elif isinstance(item, Kern):
w += item.length
else:
w += self.get_length(item)
return w
@property
def is_over_full(self):
return self.min_length > self.desired_length
@property
def desired_length(self):
if self.to is not None:
return self.to
w = self.natural_length
if self.spread is not None:
w += self.spread
return w
def append(self, *args, **kwargs):
self.contents.append(*args, **kwargs)
def extend(self, *args, **kwargs):
self.contents.extend(*args, **kwargs)
def copy(self, *args, **kwargs):
# If glue is set, need to tell the constructor that set_glue should be
# True, but that the glue is already set.
if self.set_glue:
raise NotImplementedError('Can only copy un-set boxes at the '
'moment, because that is all that is '
'needed')
return self.__class__(contents=self.contents[:],
to=self.to, spread=self.spread, set_glue=False)
def glue_set_ratio(self):
return glue_set_ratio(self.natural_length, self.desired_length,
tuple(self.stretch), tuple(self.shrink))
def scale_and_set(self):
line_state, glue_ratio, glue_set_order = self.glue_set_ratio()
# I undo the disobeyance I did in the glue set ratio logic, to align
# with the TeXbook from now on.
if glue_ratio in (GlueRatio.no_shrinkability,
GlueRatio.no_stretchability):
glue_ratio = 0.0
# Note I've quoted this from the TeXbook, talking about setting glue in
# an H Box. But it later says that this all applies to V Boxes, so I've
# changed 'width' to 'length'.
# Every glob of glue in the list being boxed is modified. Suppose the
# glue has natural length u, stretchability y, and shrinkability z,
# where y is a jth order infinity and z is a kth order infinity.
for i, item in enumerate(self.contents):
if (not isinstance(item, Glue)) or item.is_set:
continue
g = item
if line_state == LineState.naturally_good:
glue_diff = 0
elif line_state == LineState.should_stretch:
glue_order, glue_factor = extract_dimen(g.stretch)
# [Each] glue takes the new length u + ry if j=i;
# it keeps its natural length u if j != i.
if glue_order == glue_set_order:
glue_diff = glue_ratio * glue_factor
else:
glue_diff = 0
elif line_state == LineState.should_shrink:
glue_order, glue_factor = extract_dimen(g.shrink)
# [Each] glue takes the new length u-rz if k = i; it
# keeps its natural length u | random_line_split |
||
box.py | (self, *args, **kwargs):
self.contents.extend(*args, **kwargs)
def copy(self, *args, **kwargs):
# If glue is set, need to tell the constructor that set_glue should be
# True, but that the glue is already set.
if self.set_glue:
raise NotImplementedError('Can only copy un-set boxes at the '
'moment, because that is all that is '
'needed')
return self.__class__(contents=self.contents[:],
to=self.to, spread=self.spread, set_glue=False)
def glue_set_ratio(self):
return glue_set_ratio(self.natural_length, self.desired_length,
tuple(self.stretch), tuple(self.shrink))
def scale_and_set(self):
line_state, glue_ratio, glue_set_order = self.glue_set_ratio()
# I undo the disobeyance I did in the glue set ratio logic, to align
# with the TeXbook from now on.
if glue_ratio in (GlueRatio.no_shrinkability,
GlueRatio.no_stretchability):
glue_ratio = 0.0
# Note I've quoted this from the TeXbook, talking about setting glue in
# an H Box. But it later says that this all applies to V Boxes, so I've
# changed 'width' to 'length'.
# Every glob of glue in the list being boxed is modified. Suppose the
# glue has natural length u, stretchability y, and shrinkability z,
# where y is a jth order infinity and z is a kth order infinity.
for i, item in enumerate(self.contents):
if (not isinstance(item, Glue)) or item.is_set:
continue
g = item
if line_state == LineState.naturally_good:
glue_diff = 0
elif line_state == LineState.should_stretch:
glue_order, glue_factor = extract_dimen(g.stretch)
# [Each] glue takes the new length u + ry if j=i;
# it keeps its natural length u if j != i.
if glue_order == glue_set_order:
glue_diff = glue_ratio * glue_factor
else:
glue_diff = 0
elif line_state == LineState.should_shrink:
glue_order, glue_factor = extract_dimen(g.shrink)
# [Each] glue takes the new length u-rz if k = i; it
# keeps its natural length u if k != i.
if glue_order == glue_set_order:
glue_diff = -glue_ratio * glue_factor
else:
glue_diff = 0
else:
raise ValueError(f'Unknown line state: {line_state}')
# Notice that stretching or shrinking occurs only when the glue
# has the highest order of infinity that doesn't cancel out.
self.contents[i].set(round(g.natural_length + glue_diff))
self.set_glue = True
def badness(self):
"""
Compute how bad this box would look if placed on a line. This is
high if the line is much shorter or longer than the page width.
"""
# Page 97 of TeXbook.
# "The badness of a line is an integer that is approximately 100 times
# the cube of the ratio by which the glue inside the line must stretch
# or shrink to make an hbox of the required size. For example, if the
# line has a total shrinkability of 10 points, and if the glue is being
# compressed by a total of 9 points, the badness is computed to be 73
# (since 100 * (9/10)^3 = 72.9); similarly, a line that stretches by
# twice its total stretchability has a badness of 800. But if the
# badness obtained by this method turns out to be more than 10000, the
# value 10000 is used. (See the discussion of glue set ratio and glue
# set order in Chapter 12; if i != 0, there is infinite stretchability
# or shrinkability, so the badness is zero, otherwise the badness is
# approximately min(100r^3, 10000).) Overfull boxes are considered to
# be infinitely bad; they are avoided whenever possible."
# Page 111 of TeXbook.
# "Vertical badness is computed by the same rules as horizontal
# badness; it is an integer between 0 and 10000, inclusive, except when
# the box is overfull, when it is infinity."
if self.is_over_full:
return math.inf
line_state, glue_ratio, glue_order = self.glue_set_ratio()
if glue_order > 0:
return 0
# I can't find this stated anywhere, but it seems intuitively correct:
# a single word on a line has no flexibility, but it is probably bad.
elif glue_ratio in (GlueRatio.no_stretchability,
GlueRatio.no_shrinkability):
return 10000
else:
return min(round(100 * glue_ratio ** 3), 10000)
class HBox(AbstractBox):
def get_length(self, item):
if isinstance(item, (Glue, Kern)):
return item.length
else:
return item.width
@property
def widths(self):
return [self.get_length(e) for e in self.contents]
@property
def heights(self):
return [0 if isinstance(e, (Glue, Kern)) else e.height
for e in self.contents]
@property
def depths(self):
return [0 if isinstance(e, (Glue, Kern)) else e.depth
for e in self.contents]
@property
def width(self):
if not self.set_glue:
raise AttributeError('HBox is not set yet, does not have a width')
return self.desired_length
# TODO: I'm not sure the height and depth definitions are correct.
@property
def height(self):
return max(self.heights, default=0)
@property
def depth(self):
return max(self.depths, default=0)
def demerit(self, break_item, line_penalty):
ten_k = 10000
el = line_penalty
b = self.badness()
p = get_penalty(self.contents, break_item)
d = (el + b)**2
if 0 <= p < ten_k:
d += p**2
elif -ten_k < p < 0:
d -= p**2
elif p <= -ten_k:
pass
else:
raise LogicError('Undefined condition state when computing '
'demerit')
return d
def considerable_as_line(self, tolerance, break_item):
return (get_penalty(self.contents, break_item) < 10000
and (self.badness() <= tolerance))
class VBox(AbstractBox):
def get_length(self, item):
if isinstance(item, (Glue, Kern)):
return item.length
else:
return item.height
@property
def widths(self):
return [0 if isinstance(e, (Glue, Kern)) else e.width
for e in self.contents]
@property
def heights(self):
return [self.get_length(e) for e in self.contents]
@property
def depths(self):
return [0 if isinstance(e, (Glue, Kern)) else e.width
for e in self.contents]
@property
def width(self):
return max(self.widths)
@property
def height(self):
if not self.set_glue:
raise AttributeError('VBox is not set yet, does not have a height')
return self.desired_length
# TODO: This is wrong. Correct rules are in TeXbook page 80.
@property
def depth(self):
if self.contents:
# This is an approximation of the rules, not an attempt at
# correctness.
if not isinstance(self.contents[-1], AbstractBox):
return 0
else:
return self.contents[-1].depth
else:
return 0
def page_break_cost_and_penalty(self, break_item, insert_penalties):
# Page 111 of TeXbook.
ten_k = 10000
b = self.badness()
p = get_penalty(self.contents, break_item)
q = insert_penalties
if b < math.inf and p <= -ten_k and q < ten_k:
c = p
elif b < ten_k and -ten_k < p < ten_k and q < ten_k:
c = b + p + q
elif b >= ten_k and -ten_k < p < ten_k and q < ten_k:
# Not ten_k, I checked!
hundred_k = 100000
c = hundred_k
elif (b == math.inf or q >= ten_k) and p < ten_k:
c = math.inf
else:
raise LogicError('TeX implies we should not get here')
return c, p
class | Rule | identifier_name |
|
twine.py | (self, **kwargs):
for name, strand in self._load_twine(**kwargs).items():
setattr(self, name, strand)
self._available_strands = set(trim_suffix(name, "_schema") for name in vars(self))
self._available_manifest_strands = self._available_strands & set(MANIFEST_STRANDS)
def _load_twine(self, source=None):
"""Load twine from a *.json filename, file-like or a json string and validates twine contents."""
if source is None:
# If loading an unspecified twine, return an empty one rather than raising error (like in _load_data())
raw_twine = {}
logger.warning("No twine source specified. Loading empty twine.")
else:
raw_twine = self._load_json("twine", source, allowed_kinds=("file-like", "filename", "string", "object"))
self._validate_against_schema("twine", raw_twine)
self._validate_twine_version(twine_file_twined_version=raw_twine.get("twined_version", None))
return raw_twine
def _load_json(self, kind, source, **kwargs):
"""Load data from either a *.json file, an open file pointer or a json string. Directly returns any other data."""
if source is None:
raise exceptions.invalid_json_map[kind](f"Cannot load {kind} - no data source specified.")
# Decode the json string and deserialize to objects.
try:
data = load_json(source, **kwargs)
except FileNotFoundError as e:
raise exceptions.file_not_found_map[kind](e)
except jsonlib.decoder.JSONDecodeError as e:
raise exceptions.invalid_json_map[kind](e)
return data
def _get_schema(self, strand):
"""Get the schema for the given strand.
Can be used to validate:
- the twine file contents itself against the present version twine spec
- children data against the required schema for the present version twine spec
- values data for compliance with schema written in the twine (for strands like input_values_schema)
:param str strand:
:return dict:
"""
if strand == "twine":
# The data is a twine. A twine *contains* schema, but we also need to verify that it matches a certain
# schema itself. The twine schema is distributed with this packaged to ensure version consistency...
schema_path = "schema/twine_schema.json"
elif strand in CHILDREN_STRANDS:
# The data is a list of children. The "children" strand of the twine describes matching criteria for
# the children, not the schema of the "children" data, which is distributed with this package to ensure
# version consistency...
schema_path = "schema/children_schema.json"
elif strand in MANIFEST_STRANDS:
# The data is a manifest of files. The "*_manifest" strands of the twine describe matching criteria used to
# filter files appropriate for consumption by the digital twin, not the schema of the manifest data, which
# is distributed with this package to ensure version consistency...
schema_path = "schema/manifest_schema.json"
else:
if strand not in SCHEMA_STRANDS:
raise exceptions.UnknownStrand(f"Unknown strand {strand}. Try one of {ALL_STRANDS}.")
# Get schema from twine.json file.
schema_key = strand + "_schema"
try:
return getattr(self, schema_key)
except AttributeError:
raise exceptions.StrandNotFound(f"Cannot validate - no {schema_key} strand in the twine")
return jsonlib.loads(pkg_resources.resource_string("twined", schema_path))
def _validate_against_schema(self, strand, data):
"""Validate data against a schema, raises exceptions of type Invalid<strand>Json if not compliant.
Can be used to validate:
- the twine file contents itself against the present version twine spec
- children data against the required schema for the present version twine spec
- values data for compliance with schema written in the twine (for strands like input_values_schema)
:param str strand:
:param dict data:
:return None:
"""
schema = self._get_schema(strand)
try:
jsonschema_validate(instance=data, schema=schema)
logger.debug("Validated %s against schema", strand)
except ValidationError as e:
raise exceptions.invalid_contents_map[strand](str(e))
def _validate_twine_version(self, twine_file_twined_version):
"""Validate that the installed version is consistent with an optional version specification in the twine file."""
installed_twined_version = pkg_resources.get_distribution("twined").version
logger.debug(
"Twine versions... %s installed, %s specified in twine", installed_twined_version, twine_file_twined_version
)
if (twine_file_twined_version is not None) and (installed_twined_version != twine_file_twined_version):
raise exceptions.TwineVersionConflict(
f"Twined library version conflict. Twine file requires {twine_file_twined_version} but you have {installed_twined_version} installed"
)
def _validate_values(self, kind, source, cls=None, **kwargs):
"""Validate values against the twine schema."""
data = self._load_json(kind, source, **kwargs)
self._validate_against_schema(kind, data)
if cls:
return cls(**data)
return data
def _validate_manifest(self, kind, source, cls=None, **kwargs):
"""Validate manifest against the twine schema."""
data = self._load_json(kind, source, **kwargs)
# TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive
inbound = True
if hasattr(data, "to_primitive"):
inbound = False
data = data.to_primitive()
self._validate_against_schema(kind, data)
self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data)
if cls and inbound:
return cls(**data)
return data
def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest):
"""Check that all non-optional datasets specified in the corresponding manifest strand in the twine are present
in the given manifest.
:param str manifest_kind: the kind of manifest that's being validated (so the correct schema can be accessed)
:param dict manifest: the manifest whose datasets are to be validated
:raise twined.exceptions.InvalidManifestContents: if one or more of the expected non-optional datasets is missing
:return None:
"""
# This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files.
manifest_schema = getattr(self, manifest_kind)
for expected_dataset_name, expected_dataset_schema in manifest_schema["datasets"].items():
if expected_dataset_name in manifest["datasets"]:
continue
if expected_dataset_schema.get("optional", False):
continue
raise exceptions.invalid_contents_map[manifest_kind](
f"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing."
)
@property
def available_strands(self):
"""Get the names of strands that are found in this twine.
:return set:
"""
return self._available_strands
@property
def available_manifest_strands(self):
"""Get the names of the manifest strands that are found in this twine.
:return set:
"""
return self._available_manifest_strands
def validate_children(self, source, **kwargs):
"""Validate that the children values, passed as either a file or a json string, are correct."""
# TODO cache this loaded data keyed on a hashed version of kwargs
children = self._load_json("children", source, **kwargs)
self._validate_against_schema("children", children)
strand = getattr(self, "children", [])
# Loop the children and accumulate values so we have an O(1) check
children_keys = {}
for child in children:
children_keys[child["key"]] = children_keys.get(child["key"], 0) + 1
# Check there is at least one child for each item described in the strand
# TODO add max, min num specs to the strand schema and check here
for item in strand:
strand_key = item["key"]
if children_keys.get(strand_key, 0) <= 0:
raise exceptions.InvalidValuesContents(f"No children found matching the key {strand_key}")
# Loop the strand and add unique keys to dict so we have an O(1) check
strand_keys = {}
for item in strand:
strand_keys[item["key"]] = True
# Check that each child has a key which is described in the strand
for child in children:
child_key = child["key"]
if not strand_keys.get(child_key, False):
raise exceptions.InvalidValuesContents(
f"Child with key '{child_key}' found but no such key exists in the 'children' strand of the twine."
)
# TODO Additional validation that the children match what is set as required in the Twine
return children
def validate_credentials(self, *args, dotenv_path=None, **kwargs):
"""Validate that all credentials required by the twine are present.
Credentials must be set as environment variables, or defined in a '.env | __init__ | identifier_name |
|
twine.py | (self, kind, source, **kwargs):
"""Load data from either a *.json file, an open file pointer or a json string. Directly returns any other data."""
if source is None:
raise exceptions.invalid_json_map[kind](f"Cannot load {kind} - no data source specified.")
# Decode the json string and deserialize to objects.
try:
data = load_json(source, **kwargs)
except FileNotFoundError as e:
raise exceptions.file_not_found_map[kind](e)
except jsonlib.decoder.JSONDecodeError as e:
raise exceptions.invalid_json_map[kind](e)
return data
def _get_schema(self, strand):
"""Get the schema for the given strand.
Can be used to validate:
- the twine file contents itself against the present version twine spec
- children data against the required schema for the present version twine spec
- values data for compliance with schema written in the twine (for strands like input_values_schema)
:param str strand:
:return dict:
"""
if strand == "twine":
# The data is a twine. A twine *contains* schema, but we also need to verify that it matches a certain
# schema itself. The twine schema is distributed with this packaged to ensure version consistency...
schema_path = "schema/twine_schema.json"
elif strand in CHILDREN_STRANDS:
# The data is a list of children. The "children" strand of the twine describes matching criteria for
# the children, not the schema of the "children" data, which is distributed with this package to ensure | # version consistency...
schema_path = "schema/children_schema.json"
elif strand in MANIFEST_STRANDS:
# The data is a manifest of files. The "*_manifest" strands of the twine describe matching criteria used to
# filter files appropriate for consumption by the digital twin, not the schema of the manifest data, which
# is distributed with this package to ensure version consistency...
schema_path = "schema/manifest_schema.json"
else:
if strand not in SCHEMA_STRANDS:
raise exceptions.UnknownStrand(f"Unknown strand {strand}. Try one of {ALL_STRANDS}.")
# Get schema from twine.json file.
schema_key = strand + "_schema"
try:
return getattr(self, schema_key)
except AttributeError:
raise exceptions.StrandNotFound(f"Cannot validate - no {schema_key} strand in the twine")
return jsonlib.loads(pkg_resources.resource_string("twined", schema_path))
def _validate_against_schema(self, strand, data):
"""Validate data against a schema, raises exceptions of type Invalid<strand>Json if not compliant.
Can be used to validate:
- the twine file contents itself against the present version twine spec
- children data against the required schema for the present version twine spec
- values data for compliance with schema written in the twine (for strands like input_values_schema)
:param str strand:
:param dict data:
:return None:
"""
schema = self._get_schema(strand)
try:
jsonschema_validate(instance=data, schema=schema)
logger.debug("Validated %s against schema", strand)
except ValidationError as e:
raise exceptions.invalid_contents_map[strand](str(e))
def _validate_twine_version(self, twine_file_twined_version):
"""Validate that the installed version is consistent with an optional version specification in the twine file."""
installed_twined_version = pkg_resources.get_distribution("twined").version
logger.debug(
"Twine versions... %s installed, %s specified in twine", installed_twined_version, twine_file_twined_version
)
if (twine_file_twined_version is not None) and (installed_twined_version != twine_file_twined_version):
raise exceptions.TwineVersionConflict(
f"Twined library version conflict. Twine file requires {twine_file_twined_version} but you have {installed_twined_version} installed"
)
def _validate_values(self, kind, source, cls=None, **kwargs):
"""Validate values against the twine schema."""
data = self._load_json(kind, source, **kwargs)
self._validate_against_schema(kind, data)
if cls:
return cls(**data)
return data
def _validate_manifest(self, kind, source, cls=None, **kwargs):
"""Validate manifest against the twine schema."""
data = self._load_json(kind, source, **kwargs)
# TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive
inbound = True
if hasattr(data, "to_primitive"):
inbound = False
data = data.to_primitive()
self._validate_against_schema(kind, data)
self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data)
if cls and inbound:
return cls(**data)
return data
def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest):
"""Check that all non-optional datasets specified in the corresponding manifest strand in the twine are present
in the given manifest.
:param str manifest_kind: the kind of manifest that's being validated (so the correct schema can be accessed)
:param dict manifest: the manifest whose datasets are to be validated
:raise twined.exceptions.InvalidManifestContents: if one or more of the expected non-optional datasets is missing
:return None:
"""
# This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files.
manifest_schema = getattr(self, manifest_kind)
for expected_dataset_name, expected_dataset_schema in manifest_schema["datasets"].items():
if expected_dataset_name in manifest["datasets"]:
continue
if expected_dataset_schema.get("optional", False):
continue
raise exceptions.invalid_contents_map[manifest_kind](
f"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing."
)
@property
def available_strands(self):
"""Get the names of strands that are found in this twine.
:return set:
"""
return self._available_strands
@property
def available_manifest_strands(self):
"""Get the names of the manifest strands that are found in this twine.
:return set:
"""
return self._available_manifest_strands
def validate_children(self, source, **kwargs):
"""Validate that the children values, passed as either a file or a json string, are correct."""
# TODO cache this loaded data keyed on a hashed version of kwargs
children = self._load_json("children", source, **kwargs)
self._validate_against_schema("children", children)
strand = getattr(self, "children", [])
# Loop the children and accumulate values so we have an O(1) check
children_keys = {}
for child in children:
children_keys[child["key"]] = children_keys.get(child["key"], 0) + 1
# Check there is at least one child for each item described in the strand
# TODO add max, min num specs to the strand schema and check here
for item in strand:
strand_key = item["key"]
if children_keys.get(strand_key, 0) <= 0:
raise exceptions.InvalidValuesContents(f"No children found matching the key {strand_key}")
# Loop the strand and add unique keys to dict so we have an O(1) check
strand_keys = {}
for item in strand:
strand_keys[item["key"]] = True
# Check that each child has a key which is described in the strand
for child in children:
child_key = child["key"]
if not strand_keys.get(child_key, False):
raise exceptions.InvalidValuesContents(
f"Child with key '{child_key}' found but no such key exists in the 'children' strand of the twine."
)
# TODO Additional validation that the children match what is set as required in the Twine
return children
def validate_credentials(self, *args, dotenv_path=None, **kwargs):
"""Validate that all credentials required by the twine are present.
Credentials must be set as environment variables, or defined in a '.env' file. If stored remotely in a secrets
manager (e.g. Google Cloud Secrets), they must be loaded into the environment before validating the credentials
strand.
If not present in the environment, validate_credentials will check for variables in a .env file (if present)
and populate the environment with them. Typically a .env file resides at the root of your application (the
working directory) although a specific path may be set using the `dotenv_path` argument.
.env files should never be committed to git or any other version control system.
A .env file can look like this:
```
# a comment that will be ignored.
YOUR_SECRET_VALUE=itsasecret
MEANING_OF_LIFE=42
MULTILINE_VAR="hello\nworld"
```
Or like this (also useful for bash users):
```
export YOUR_SECRET_VALUE=itsasecret
export MEANING_OF_LIFE=42
export MULTILINE_VAR="hello\nworld"
```
"""
if not hasattr(self, "credentials"):
return set()
# Load any variables from the .env file into the environment.
| random_line_split |
|
twine.py | data is a list of children. The "children" strand of the twine describes matching criteria for
# the children, not the schema of the "children" data, which is distributed with this package to ensure
# version consistency...
schema_path = "schema/children_schema.json"
elif strand in MANIFEST_STRANDS:
# The data is a manifest of files. The "*_manifest" strands of the twine describe matching criteria used to
# filter files appropriate for consumption by the digital twin, not the schema of the manifest data, which
# is distributed with this package to ensure version consistency...
schema_path = "schema/manifest_schema.json"
else:
if strand not in SCHEMA_STRANDS:
raise exceptions.UnknownStrand(f"Unknown strand {strand}. Try one of {ALL_STRANDS}.")
# Get schema from twine.json file.
schema_key = strand + "_schema"
try:
return getattr(self, schema_key)
except AttributeError:
raise exceptions.StrandNotFound(f"Cannot validate - no {schema_key} strand in the twine")
return jsonlib.loads(pkg_resources.resource_string("twined", schema_path))
def _validate_against_schema(self, strand, data):
"""Validate data against a schema, raises exceptions of type Invalid<strand>Json if not compliant.
Can be used to validate:
- the twine file contents itself against the present version twine spec
- children data against the required schema for the present version twine spec
- values data for compliance with schema written in the twine (for strands like input_values_schema)
:param str strand:
:param dict data:
:return None:
"""
schema = self._get_schema(strand)
try:
jsonschema_validate(instance=data, schema=schema)
logger.debug("Validated %s against schema", strand)
except ValidationError as e:
raise exceptions.invalid_contents_map[strand](str(e))
def _validate_twine_version(self, twine_file_twined_version):
"""Validate that the installed version is consistent with an optional version specification in the twine file."""
installed_twined_version = pkg_resources.get_distribution("twined").version
logger.debug(
"Twine versions... %s installed, %s specified in twine", installed_twined_version, twine_file_twined_version
)
if (twine_file_twined_version is not None) and (installed_twined_version != twine_file_twined_version):
raise exceptions.TwineVersionConflict(
f"Twined library version conflict. Twine file requires {twine_file_twined_version} but you have {installed_twined_version} installed"
)
def _validate_values(self, kind, source, cls=None, **kwargs):
"""Validate values against the twine schema."""
data = self._load_json(kind, source, **kwargs)
self._validate_against_schema(kind, data)
if cls:
return cls(**data)
return data
def _validate_manifest(self, kind, source, cls=None, **kwargs):
"""Validate manifest against the twine schema."""
data = self._load_json(kind, source, **kwargs)
# TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive
inbound = True
if hasattr(data, "to_primitive"):
inbound = False
data = data.to_primitive()
self._validate_against_schema(kind, data)
self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data)
if cls and inbound:
return cls(**data)
return data
def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest):
"""Check that all non-optional datasets specified in the corresponding manifest strand in the twine are present
in the given manifest.
:param str manifest_kind: the kind of manifest that's being validated (so the correct schema can be accessed)
:param dict manifest: the manifest whose datasets are to be validated
:raise twined.exceptions.InvalidManifestContents: if one or more of the expected non-optional datasets is missing
:return None:
"""
# This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files.
manifest_schema = getattr(self, manifest_kind)
for expected_dataset_name, expected_dataset_schema in manifest_schema["datasets"].items():
if expected_dataset_name in manifest["datasets"]:
continue
if expected_dataset_schema.get("optional", False):
continue
raise exceptions.invalid_contents_map[manifest_kind](
f"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing."
)
@property
def available_strands(self):
"""Get the names of strands that are found in this twine.
:return set:
"""
return self._available_strands
@property
def available_manifest_strands(self):
"""Get the names of the manifest strands that are found in this twine.
:return set:
"""
return self._available_manifest_strands
def validate_children(self, source, **kwargs):
"""Validate that the children values, passed as either a file or a json string, are correct."""
# TODO cache this loaded data keyed on a hashed version of kwargs
children = self._load_json("children", source, **kwargs)
self._validate_against_schema("children", children)
strand = getattr(self, "children", [])
# Loop the children and accumulate values so we have an O(1) check
children_keys = {}
for child in children:
children_keys[child["key"]] = children_keys.get(child["key"], 0) + 1
# Check there is at least one child for each item described in the strand
# TODO add max, min num specs to the strand schema and check here
for item in strand:
strand_key = item["key"]
if children_keys.get(strand_key, 0) <= 0:
raise exceptions.InvalidValuesContents(f"No children found matching the key {strand_key}")
# Loop the strand and add unique keys to dict so we have an O(1) check
strand_keys = {}
for item in strand:
strand_keys[item["key"]] = True
# Check that each child has a key which is described in the strand
for child in children:
child_key = child["key"]
if not strand_keys.get(child_key, False):
raise exceptions.InvalidValuesContents(
f"Child with key '{child_key}' found but no such key exists in the 'children' strand of the twine."
)
# TODO Additional validation that the children match what is set as required in the Twine
return children
def validate_credentials(self, *args, dotenv_path=None, **kwargs):
"""Validate that all credentials required by the twine are present.
Credentials must be set as environment variables, or defined in a '.env' file. If stored remotely in a secrets
manager (e.g. Google Cloud Secrets), they must be loaded into the environment before validating the credentials
strand.
If not present in the environment, validate_credentials will check for variables in a .env file (if present)
and populate the environment with them. Typically a .env file resides at the root of your application (the
working directory) although a specific path may be set using the `dotenv_path` argument.
.env files should never be committed to git or any other version control system.
A .env file can look like this:
```
# a comment that will be ignored.
YOUR_SECRET_VALUE=itsasecret
MEANING_OF_LIFE=42
MULTILINE_VAR="hello\nworld"
```
Or like this (also useful for bash users):
```
export YOUR_SECRET_VALUE=itsasecret
export MEANING_OF_LIFE=42
export MULTILINE_VAR="hello\nworld"
```
"""
if not hasattr(self, "credentials"):
return set()
# Load any variables from the .env file into the environment.
dotenv_path = dotenv_path or os.path.join(".", ".env")
load_dotenv(dotenv_path)
for credential in self.credentials:
if credential["name"] not in os.environ:
raise exceptions.CredentialNotFound(
f"Credential {credential['name']!r} missing from environment or .env file."
)
return self.credentials
def validate_configuration_values(self, source, **kwargs):
"""Validate that the configuration values, passed as either a file or a json string, are correct."""
return self._validate_values("configuration_values", source, **kwargs)
def validate_input_values(self, source, **kwargs):
"""Validate that the input values, passed as either a file or a json string, are correct."""
return self._validate_values("input_values", source, **kwargs)
def validate_output_values(self, source, **kwargs):
"""Validate that the output values, passed as either a file or a json string, are correct."""
return self._validate_values("output_values", source, **kwargs)
def validate_monitor_message(self, source, **kwargs):
"""Validate monitor message against the monitor message schema strand."""
return self._validate_values(kind="monitor_message", source=source, **kwargs)
def validate_configuration_manifest(self, source, **kwargs):
| """Validate the input manifest, passed as either a file or a json string."""
return self._validate_manifest("configuration_manifest", source, **kwargs) | identifier_body |
|
twine.py | , kind, source, **kwargs):
"""Load data from either a *.json file, an open file pointer or a json string. Directly returns any other data."""
if source is None:
raise exceptions.invalid_json_map[kind](f"Cannot load {kind} - no data source specified.")
# Decode the json string and deserialize to objects.
try:
data = load_json(source, **kwargs)
except FileNotFoundError as e:
raise exceptions.file_not_found_map[kind](e)
except jsonlib.decoder.JSONDecodeError as e:
raise exceptions.invalid_json_map[kind](e)
return data
def _get_schema(self, strand):
"""Get the schema for the given strand.
Can be used to validate:
- the twine file contents itself against the present version twine spec
- children data against the required schema for the present version twine spec
- values data for compliance with schema written in the twine (for strands like input_values_schema)
:param str strand:
:return dict:
"""
if strand == "twine":
# The data is a twine. A twine *contains* schema, but we also need to verify that it matches a certain
# schema itself. The twine schema is distributed with this packaged to ensure version consistency...
schema_path = "schema/twine_schema.json"
elif strand in CHILDREN_STRANDS:
# The data is a list of children. The "children" strand of the twine describes matching criteria for
# the children, not the schema of the "children" data, which is distributed with this package to ensure
# version consistency...
schema_path = "schema/children_schema.json"
elif strand in MANIFEST_STRANDS:
# The data is a manifest of files. The "*_manifest" strands of the twine describe matching criteria used to
# filter files appropriate for consumption by the digital twin, not the schema of the manifest data, which
# is distributed with this package to ensure version consistency...
schema_path = "schema/manifest_schema.json"
else:
if strand not in SCHEMA_STRANDS:
raise exceptions.UnknownStrand(f"Unknown strand {strand}. Try one of {ALL_STRANDS}.")
# Get schema from twine.json file.
schema_key = strand + "_schema"
try:
return getattr(self, schema_key)
except AttributeError:
raise exceptions.StrandNotFound(f"Cannot validate - no {schema_key} strand in the twine")
return jsonlib.loads(pkg_resources.resource_string("twined", schema_path))
def _validate_against_schema(self, strand, data):
"""Validate data against a schema, raises exceptions of type Invalid<strand>Json if not compliant.
Can be used to validate:
- the twine file contents itself against the present version twine spec
- children data against the required schema for the present version twine spec
- values data for compliance with schema written in the twine (for strands like input_values_schema)
:param str strand:
:param dict data:
:return None:
"""
schema = self._get_schema(strand)
try:
jsonschema_validate(instance=data, schema=schema)
logger.debug("Validated %s against schema", strand)
except ValidationError as e:
raise exceptions.invalid_contents_map[strand](str(e))
def _validate_twine_version(self, twine_file_twined_version):
"""Validate that the installed version is consistent with an optional version specification in the twine file."""
installed_twined_version = pkg_resources.get_distribution("twined").version
logger.debug(
"Twine versions... %s installed, %s specified in twine", installed_twined_version, twine_file_twined_version
)
if (twine_file_twined_version is not None) and (installed_twined_version != twine_file_twined_version):
raise exceptions.TwineVersionConflict(
f"Twined library version conflict. Twine file requires {twine_file_twined_version} but you have {installed_twined_version} installed"
)
def _validate_values(self, kind, source, cls=None, **kwargs):
"""Validate values against the twine schema."""
data = self._load_json(kind, source, **kwargs)
self._validate_against_schema(kind, data)
if cls:
return cls(**data)
return data
def _validate_manifest(self, kind, source, cls=None, **kwargs):
"""Validate manifest against the twine schema."""
data = self._load_json(kind, source, **kwargs)
# TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive
inbound = True
if hasattr(data, "to_primitive"):
inbound = False
data = data.to_primitive()
self._validate_against_schema(kind, data)
self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data)
if cls and inbound:
return cls(**data)
return data
def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest):
"""Check that all non-optional datasets specified in the corresponding manifest strand in the twine are present
in the given manifest.
:param str manifest_kind: the kind of manifest that's being validated (so the correct schema can be accessed)
:param dict manifest: the manifest whose datasets are to be validated
:raise twined.exceptions.InvalidManifestContents: if one or more of the expected non-optional datasets is missing
:return None:
"""
# This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files.
manifest_schema = getattr(self, manifest_kind)
for expected_dataset_name, expected_dataset_schema in manifest_schema["datasets"].items():
if expected_dataset_name in manifest["datasets"]:
continue
if expected_dataset_schema.get("optional", False):
continue
raise exceptions.invalid_contents_map[manifest_kind](
f"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing."
)
@property
def available_strands(self):
"""Get the names of strands that are found in this twine.
:return set:
"""
return self._available_strands
@property
def available_manifest_strands(self):
"""Get the names of the manifest strands that are found in this twine.
:return set:
"""
return self._available_manifest_strands
def validate_children(self, source, **kwargs):
"""Validate that the children values, passed as either a file or a json string, are correct."""
# TODO cache this loaded data keyed on a hashed version of kwargs
children = self._load_json("children", source, **kwargs)
self._validate_against_schema("children", children)
strand = getattr(self, "children", [])
# Loop the children and accumulate values so we have an O(1) check
children_keys = {}
for child in children:
children_keys[child["key"]] = children_keys.get(child["key"], 0) + 1
# Check there is at least one child for each item described in the strand
# TODO add max, min num specs to the strand schema and check here
for item in strand:
strand_key = item["key"]
if children_keys.get(strand_key, 0) <= 0:
raise exceptions.InvalidValuesContents(f"No children found matching the key {strand_key}")
# Loop the strand and add unique keys to dict so we have an O(1) check
strand_keys = {}
for item in strand:
strand_keys[item["key"]] = True
# Check that each child has a key which is described in the strand
for child in children:
|
# TODO Additional validation that the children match what is set as required in the Twine
return children
def validate_credentials(self, *args, dotenv_path=None, **kwargs):
"""Validate that all credentials required by the twine are present.
Credentials must be set as environment variables, or defined in a '.env' file. If stored remotely in a secrets
manager (e.g. Google Cloud Secrets), they must be loaded into the environment before validating the credentials
strand.
If not present in the environment, validate_credentials will check for variables in a .env file (if present)
and populate the environment with them. Typically a .env file resides at the root of your application (the
working directory) although a specific path may be set using the `dotenv_path` argument.
.env files should never be committed to git or any other version control system.
A .env file can look like this:
```
# a comment that will be ignored.
YOUR_SECRET_VALUE=itsasecret
MEANING_OF_LIFE=42
MULTILINE_VAR="hello\nworld"
```
Or like this (also useful for bash users):
```
export YOUR_SECRET_VALUE=itsasecret
export MEANING_OF_LIFE=42
export MULTILINE_VAR="hello\nworld"
```
"""
if not hasattr(self, "credentials"):
return set()
# Load any variables from the .env file into the environment.
| child_key = child["key"]
if not strand_keys.get(child_key, False):
raise exceptions.InvalidValuesContents(
f"Child with key '{child_key}' found but no such key exists in the 'children' strand of the twine."
) | conditional_block |
config.rs | pub fn config_file() -> PathBuf {
std::env::var("SILICON_CONFIG_PATH")
.ok()
.map(PathBuf::from)
.filter(|config_path| config_path.is_file())
.unwrap_or_else(|| PROJECT_DIRS.config_dir().join("config"))
}
pub fn get_args_from_config_file() -> Vec<OsString> |
fn parse_str_color(s: &str) -> Result<Rgba<u8>, Error> {
s.to_rgba()
.map_err(|_| format_err!("Invalid color: `{}`", s))
}
fn parse_font_str(s: &str) -> Vec<(String, f32)> {
let mut result = vec![];
for font in s.split(';') {
let tmp = font.split('=').collect::<Vec<_>>();
let font_name = tmp[0].to_owned();
let font_size = tmp
.get(1)
.map(|s| s.parse::<f32>().unwrap())
.unwrap_or(26.0);
result.push((font_name, font_size));
}
result
}
fn parse_line_range(s: &str) -> Result<Vec<u32>, ParseIntError> {
let mut result = vec![];
for range in s.split(';') {
let range: Vec<u32> = range
.split('-')
.map(|s| s.parse::<u32>())
.collect::<Result<Vec<_>, _>>()?;
if range.len() == 1 {
result.push(range[0])
} else {
for i in range[0]..=range[1] {
result.push(i);
}
}
}
Ok(result)
}
// https://github.com/TeXitoi/structopt/blob/master/CHANGELOG.md#support-optional-vectors-of-arguments-for-distinguishing-between--o-1-2--o-and-no-option-provided-at-all-by-sphynx-180
type FontList = Vec<(String, f32)>;
type Lines = Vec<u32>;
#[derive(StructOpt, Debug)]
#[structopt(name = "silicon")]
#[structopt(global_setting(ColoredHelp))]
pub struct Config {
/// Background image
#[structopt(long, value_name = "IMAGE", conflicts_with = "background")]
pub background_image: Option<PathBuf>,
/// Background color of the image
#[structopt(
long,
short,
value_name = "COLOR",
default_value = "#aaaaff",
parse(try_from_str = parse_str_color)
)]
pub background: Rgba<u8>,
/// Show the path of silicon config file
#[structopt(long)]
pub config_file: bool,
/// Read input from clipboard.
#[structopt(long)]
pub from_clipboard: bool,
/// File to read. If not set, stdin will be use.
#[structopt(value_name = "FILE", parse(from_os_str))]
pub file: Option<PathBuf>,
/// The fallback font list. eg. 'Hack; SimSun=31'
#[structopt(long, short, value_name = "FONT", parse(from_str = parse_font_str))]
pub font: Option<FontList>,
/// Lines to high light. rg. '1-3; 4'
#[structopt(long, value_name = "LINES", parse(try_from_str = parse_line_range))]
pub highlight_lines: Option<Lines>,
/// The language for syntax highlighting. You can use full name ("Rust") or file extension ("rs").
#[structopt(short, value_name = "LANG", long)]
pub language: Option<String>,
/// Pad between lines
#[structopt(long, value_name = "PAD", default_value = "2")]
pub line_pad: u32,
/// Line number offset
#[structopt(long, value_name = "OFFSET", default_value = "1")]
pub line_offset: u32,
/// List all themes.
#[structopt(long)]
pub list_themes: bool,
/// List all available fonts in your system
#[structopt(long)]
pub list_fonts: bool,
/// Write output image to specific location instead of cwd.
#[structopt(
short,
long,
value_name = "PATH",
required_unless_one = &["config-file", "list-fonts", "list-themes", "to-clipboard", "build-cache"]
)]
pub output: Option<PathBuf>,
/// Hide the window controls.
#[structopt(long)]
pub no_window_controls: bool,
/// Show window title
#[structopt(long, value_name = "WINDOW_TITLE")]
pub window_title: Option<String>,
/// Hide the line number.
#[structopt(long)]
pub no_line_number: bool,
/// Don't round the corner
#[structopt(long)]
pub no_round_corner: bool,
/// Pad horiz
#[structopt(long, value_name = "PAD", default_value = "80")]
pub pad_horiz: u32,
/// Pad vert
#[structopt(long, value_name = "PAD", default_value = "100")]
pub pad_vert: u32,
/// Color of shadow
#[structopt(
long,
value_name = "COLOR",
default_value = "#555555",
parse(try_from_str = parse_str_color)
)]
pub shadow_color: Rgba<u8>,
/// Blur radius of the shadow. (set it to 0 to hide shadow)
#[structopt(long, value_name = "R", default_value = "0")]
pub shadow_blur_radius: f32,
/// Shadow's offset in Y axis
#[structopt(long, value_name = "Y", default_value = "0")]
pub shadow_offset_y: i32,
/// Shadow's offset in X axis
#[structopt(long, value_name = "X", default_value = "0")]
pub shadow_offset_x: i32,
/// Tab width
#[structopt(long, value_name = "WIDTH", default_value = "4")]
pub tab_width: u8,
/// The syntax highlight theme. It can be a theme name or path to a .tmTheme file.
#[structopt(long, value_name = "THEME", default_value = "Dracula")]
pub theme: String,
/// Copy the output image to clipboard.
#[structopt(short = "c", long)]
pub to_clipboard: bool,
// Draw a custom text on the bottom right corner
// #[structopt(long)]
// watermark: Option<String>,
/// build syntax definition and theme cache
#[structopt(long, value_name = "OUTPUT_DIR")]
pub build_cache: Option<Option<PathBuf>>,
}
impl Config {
pub fn get_source_code<'a>(
&self,
ps: &'a SyntaxSet,
) -> Result<(&'a SyntaxReference, String), Error> {
let possible_language = self.language.as_ref().map(|language| {
ps.find_syntax_by_token(language)
.ok_or_else(|| format_err!("Unsupported language: {}", language))
});
if self.from_clipboard {
let mut ctx = ClipboardContext::new()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let code = ctx
.get_contents()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&code)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, code));
}
if let Some(path) = &self.file {
let mut s = String::new();
let mut file = File::open(path)?;
file.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_for_file(path)?
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, s));
}
let mut stdin = stdin();
let mut s = String::new();
stdin.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&s)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
Ok((language, s))
}
pub fn theme(&self, ts: &ThemeSet) -> Result<Theme, Error> {
if let Some(theme) = ts.themes.get(&self.theme) {
Ok(theme.clone())
} else {
ThemeSet::get_theme(&self.theme)
.context(format!("Canot load the theme: {}", self.theme))
}
}
pub fn get_formatter(&self) -> Result<ImageFormatter, Error> {
let formatter = ImageFormatterBuilder::new()
.line_pad(self | {
let args = std::fs::read_to_string(config_file())
.ok()
.and_then(|content| {
content
.split('\n')
.map(|line| line.trim())
.filter(|line| !line.starts_with('#') && !line.is_empty())
.map(shell_words::split)
.collect::<Result<Vec<_>, _>>()
.ok()
})
.unwrap_or_default();
args.iter().flatten().map(OsString::from).collect()
} | identifier_body |
config.rs | parse_font_str(s: &str) -> Vec<(String, f32)> {
let mut result = vec![];
for font in s.split(';') {
let tmp = font.split('=').collect::<Vec<_>>();
let font_name = tmp[0].to_owned();
let font_size = tmp
.get(1)
.map(|s| s.parse::<f32>().unwrap())
.unwrap_or(26.0);
result.push((font_name, font_size));
}
result
}
fn parse_line_range(s: &str) -> Result<Vec<u32>, ParseIntError> {
let mut result = vec![];
for range in s.split(';') {
let range: Vec<u32> = range
.split('-')
.map(|s| s.parse::<u32>())
.collect::<Result<Vec<_>, _>>()?;
if range.len() == 1 {
result.push(range[0])
} else {
for i in range[0]..=range[1] {
result.push(i);
}
}
}
Ok(result)
}
// https://github.com/TeXitoi/structopt/blob/master/CHANGELOG.md#support-optional-vectors-of-arguments-for-distinguishing-between--o-1-2--o-and-no-option-provided-at-all-by-sphynx-180
type FontList = Vec<(String, f32)>;
type Lines = Vec<u32>;
#[derive(StructOpt, Debug)]
#[structopt(name = "silicon")]
#[structopt(global_setting(ColoredHelp))]
pub struct Config {
/// Background image
#[structopt(long, value_name = "IMAGE", conflicts_with = "background")]
pub background_image: Option<PathBuf>,
/// Background color of the image
#[structopt(
long,
short,
value_name = "COLOR",
default_value = "#aaaaff",
parse(try_from_str = parse_str_color)
)]
pub background: Rgba<u8>,
/// Show the path of silicon config file
#[structopt(long)]
pub config_file: bool,
/// Read input from clipboard.
#[structopt(long)]
pub from_clipboard: bool,
/// File to read. If not set, stdin will be use.
#[structopt(value_name = "FILE", parse(from_os_str))]
pub file: Option<PathBuf>,
/// The fallback font list. eg. 'Hack; SimSun=31'
#[structopt(long, short, value_name = "FONT", parse(from_str = parse_font_str))]
pub font: Option<FontList>,
/// Lines to high light. rg. '1-3; 4'
#[structopt(long, value_name = "LINES", parse(try_from_str = parse_line_range))]
pub highlight_lines: Option<Lines>,
/// The language for syntax highlighting. You can use full name ("Rust") or file extension ("rs").
#[structopt(short, value_name = "LANG", long)]
pub language: Option<String>,
/// Pad between lines
#[structopt(long, value_name = "PAD", default_value = "2")]
pub line_pad: u32,
/// Line number offset
#[structopt(long, value_name = "OFFSET", default_value = "1")]
pub line_offset: u32,
/// List all themes.
#[structopt(long)]
pub list_themes: bool,
/// List all available fonts in your system
#[structopt(long)]
pub list_fonts: bool,
/// Write output image to specific location instead of cwd.
#[structopt(
short,
long,
value_name = "PATH",
required_unless_one = &["config-file", "list-fonts", "list-themes", "to-clipboard", "build-cache"]
)]
pub output: Option<PathBuf>,
/// Hide the window controls.
#[structopt(long)]
pub no_window_controls: bool,
/// Show window title
#[structopt(long, value_name = "WINDOW_TITLE")]
pub window_title: Option<String>,
/// Hide the line number.
#[structopt(long)]
pub no_line_number: bool,
/// Don't round the corner
#[structopt(long)]
pub no_round_corner: bool,
/// Pad horiz
#[structopt(long, value_name = "PAD", default_value = "80")]
pub pad_horiz: u32,
/// Pad vert
#[structopt(long, value_name = "PAD", default_value = "100")]
pub pad_vert: u32,
/// Color of shadow
#[structopt(
long,
value_name = "COLOR",
default_value = "#555555",
parse(try_from_str = parse_str_color)
)]
pub shadow_color: Rgba<u8>,
/// Blur radius of the shadow. (set it to 0 to hide shadow)
#[structopt(long, value_name = "R", default_value = "0")]
pub shadow_blur_radius: f32,
/// Shadow's offset in Y axis
#[structopt(long, value_name = "Y", default_value = "0")]
pub shadow_offset_y: i32,
/// Shadow's offset in X axis
#[structopt(long, value_name = "X", default_value = "0")]
pub shadow_offset_x: i32,
/// Tab width
#[structopt(long, value_name = "WIDTH", default_value = "4")]
pub tab_width: u8,
/// The syntax highlight theme. It can be a theme name or path to a .tmTheme file.
#[structopt(long, value_name = "THEME", default_value = "Dracula")]
pub theme: String,
/// Copy the output image to clipboard.
#[structopt(short = "c", long)]
pub to_clipboard: bool,
// Draw a custom text on the bottom right corner
// #[structopt(long)]
// watermark: Option<String>,
/// build syntax definition and theme cache
#[structopt(long, value_name = "OUTPUT_DIR")]
pub build_cache: Option<Option<PathBuf>>,
}
impl Config {
pub fn get_source_code<'a>(
&self,
ps: &'a SyntaxSet,
) -> Result<(&'a SyntaxReference, String), Error> {
let possible_language = self.language.as_ref().map(|language| {
ps.find_syntax_by_token(language)
.ok_or_else(|| format_err!("Unsupported language: {}", language))
});
if self.from_clipboard {
let mut ctx = ClipboardContext::new()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let code = ctx
.get_contents()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&code)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, code));
}
if let Some(path) = &self.file {
let mut s = String::new();
let mut file = File::open(path)?;
file.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_for_file(path)?
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, s));
}
let mut stdin = stdin();
let mut s = String::new();
stdin.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&s)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
Ok((language, s))
}
pub fn theme(&self, ts: &ThemeSet) -> Result<Theme, Error> {
if let Some(theme) = ts.themes.get(&self.theme) {
Ok(theme.clone())
} else {
ThemeSet::get_theme(&self.theme)
.context(format!("Canot load the theme: {}", self.theme))
}
}
pub fn get_formatter(&self) -> Result<ImageFormatter, Error> {
let formatter = ImageFormatterBuilder::new()
.line_pad(self.line_pad)
.window_controls(!self.no_window_controls)
.window_title(self.window_title.clone())
.line_number(!self.no_line_number)
.font(self.font.clone().unwrap_or_default())
.round_corner(!self.no_round_corner)
.shadow_adder(self.get_shadow_adder()?)
.tab_width(self.tab_width)
.highlight_lines(self.highlight_lines.clone().unwrap_or_default())
.line_offset(self.line_offset);
Ok(formatter.build()?)
}
pub fn get_shadow_adder(&self) -> Result<ShadowAdder, Error> {
Ok(ShadowAdder::new()
.background(match &self.background_image {
Some(path) => Background::Image(image::open(path)?.to_rgba8()),
None => Background::Solid(self.background),
})
.shadow_color(self.shadow_color)
.blur_radius(self.shadow_blur_radius)
.pad_horiz(self.pad_horiz)
.pad_vert(self.pad_vert)
.offset_x(self.shadow_offset_x)
.offset_y(self.shadow_offset_y))
}
pub fn | get_expanded_output | identifier_name |
|
config.rs | };
pub fn config_file() -> PathBuf {
std::env::var("SILICON_CONFIG_PATH")
.ok()
.map(PathBuf::from)
.filter(|config_path| config_path.is_file())
.unwrap_or_else(|| PROJECT_DIRS.config_dir().join("config"))
}
pub fn get_args_from_config_file() -> Vec<OsString> {
let args = std::fs::read_to_string(config_file())
.ok()
.and_then(|content| {
content
.split('\n')
.map(|line| line.trim())
.filter(|line| !line.starts_with('#') && !line.is_empty())
.map(shell_words::split)
.collect::<Result<Vec<_>, _>>()
.ok()
})
.unwrap_or_default();
args.iter().flatten().map(OsString::from).collect()
}
fn parse_str_color(s: &str) -> Result<Rgba<u8>, Error> {
s.to_rgba()
.map_err(|_| format_err!("Invalid color: `{}`", s))
}
fn parse_font_str(s: &str) -> Vec<(String, f32)> {
let mut result = vec![];
for font in s.split(';') {
let tmp = font.split('=').collect::<Vec<_>>();
let font_name = tmp[0].to_owned();
let font_size = tmp
.get(1)
.map(|s| s.parse::<f32>().unwrap())
.unwrap_or(26.0);
result.push((font_name, font_size));
}
result
}
fn parse_line_range(s: &str) -> Result<Vec<u32>, ParseIntError> {
let mut result = vec![];
for range in s.split(';') {
let range: Vec<u32> = range
.split('-')
.map(|s| s.parse::<u32>())
.collect::<Result<Vec<_>, _>>()?;
if range.len() == 1 {
result.push(range[0])
} else {
for i in range[0]..=range[1] {
result.push(i);
}
}
}
Ok(result)
}
// https://github.com/TeXitoi/structopt/blob/master/CHANGELOG.md#support-optional-vectors-of-arguments-for-distinguishing-between--o-1-2--o-and-no-option-provided-at-all-by-sphynx-180
type FontList = Vec<(String, f32)>; | pub struct Config {
/// Background image
#[structopt(long, value_name = "IMAGE", conflicts_with = "background")]
pub background_image: Option<PathBuf>,
/// Background color of the image
#[structopt(
long,
short,
value_name = "COLOR",
default_value = "#aaaaff",
parse(try_from_str = parse_str_color)
)]
pub background: Rgba<u8>,
/// Show the path of silicon config file
#[structopt(long)]
pub config_file: bool,
/// Read input from clipboard.
#[structopt(long)]
pub from_clipboard: bool,
/// File to read. If not set, stdin will be use.
#[structopt(value_name = "FILE", parse(from_os_str))]
pub file: Option<PathBuf>,
/// The fallback font list. eg. 'Hack; SimSun=31'
#[structopt(long, short, value_name = "FONT", parse(from_str = parse_font_str))]
pub font: Option<FontList>,
/// Lines to high light. rg. '1-3; 4'
#[structopt(long, value_name = "LINES", parse(try_from_str = parse_line_range))]
pub highlight_lines: Option<Lines>,
/// The language for syntax highlighting. You can use full name ("Rust") or file extension ("rs").
#[structopt(short, value_name = "LANG", long)]
pub language: Option<String>,
/// Pad between lines
#[structopt(long, value_name = "PAD", default_value = "2")]
pub line_pad: u32,
/// Line number offset
#[structopt(long, value_name = "OFFSET", default_value = "1")]
pub line_offset: u32,
/// List all themes.
#[structopt(long)]
pub list_themes: bool,
/// List all available fonts in your system
#[structopt(long)]
pub list_fonts: bool,
/// Write output image to specific location instead of cwd.
#[structopt(
short,
long,
value_name = "PATH",
required_unless_one = &["config-file", "list-fonts", "list-themes", "to-clipboard", "build-cache"]
)]
pub output: Option<PathBuf>,
/// Hide the window controls.
#[structopt(long)]
pub no_window_controls: bool,
/// Show window title
#[structopt(long, value_name = "WINDOW_TITLE")]
pub window_title: Option<String>,
/// Hide the line number.
#[structopt(long)]
pub no_line_number: bool,
/// Don't round the corner
#[structopt(long)]
pub no_round_corner: bool,
/// Pad horiz
#[structopt(long, value_name = "PAD", default_value = "80")]
pub pad_horiz: u32,
/// Pad vert
#[structopt(long, value_name = "PAD", default_value = "100")]
pub pad_vert: u32,
/// Color of shadow
#[structopt(
long,
value_name = "COLOR",
default_value = "#555555",
parse(try_from_str = parse_str_color)
)]
pub shadow_color: Rgba<u8>,
/// Blur radius of the shadow. (set it to 0 to hide shadow)
#[structopt(long, value_name = "R", default_value = "0")]
pub shadow_blur_radius: f32,
/// Shadow's offset in Y axis
#[structopt(long, value_name = "Y", default_value = "0")]
pub shadow_offset_y: i32,
/// Shadow's offset in X axis
#[structopt(long, value_name = "X", default_value = "0")]
pub shadow_offset_x: i32,
/// Tab width
#[structopt(long, value_name = "WIDTH", default_value = "4")]
pub tab_width: u8,
/// The syntax highlight theme. It can be a theme name or path to a .tmTheme file.
#[structopt(long, value_name = "THEME", default_value = "Dracula")]
pub theme: String,
/// Copy the output image to clipboard.
#[structopt(short = "c", long)]
pub to_clipboard: bool,
// Draw a custom text on the bottom right corner
// #[structopt(long)]
// watermark: Option<String>,
/// build syntax definition and theme cache
#[structopt(long, value_name = "OUTPUT_DIR")]
pub build_cache: Option<Option<PathBuf>>,
}
impl Config {
pub fn get_source_code<'a>(
&self,
ps: &'a SyntaxSet,
) -> Result<(&'a SyntaxReference, String), Error> {
let possible_language = self.language.as_ref().map(|language| {
ps.find_syntax_by_token(language)
.ok_or_else(|| format_err!("Unsupported language: {}", language))
});
if self.from_clipboard {
let mut ctx = ClipboardContext::new()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let code = ctx
.get_contents()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&code)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, code));
}
if let Some(path) = &self.file {
let mut s = String::new();
let mut file = File::open(path)?;
file.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_for_file(path)?
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, s));
}
let mut stdin = stdin();
let mut s = String::new();
stdin.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&s)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
Ok((language, s))
}
pub fn theme(&self, ts: &ThemeSet) -> Result<Theme, Error> {
if let Some(theme) = ts.themes.get(&self.theme) {
Ok(theme.clone())
} else {
ThemeSet::get_theme(&self.theme)
.context(format!("Canot load the theme: {}", self.theme))
}
}
pub fn get_formatter(&self) -> Result<ImageFormatter, Error> {
let formatter = ImageFormatterBuilder::new()
.line_pad(self.line | type Lines = Vec<u32>;
#[derive(StructOpt, Debug)]
#[structopt(name = "silicon")]
#[structopt(global_setting(ColoredHelp))] | random_line_split |
config.rs | pub fn config_file() -> PathBuf {
std::env::var("SILICON_CONFIG_PATH")
.ok()
.map(PathBuf::from)
.filter(|config_path| config_path.is_file())
.unwrap_or_else(|| PROJECT_DIRS.config_dir().join("config"))
}
pub fn get_args_from_config_file() -> Vec<OsString> {
let args = std::fs::read_to_string(config_file())
.ok()
.and_then(|content| {
content
.split('\n')
.map(|line| line.trim())
.filter(|line| !line.starts_with('#') && !line.is_empty())
.map(shell_words::split)
.collect::<Result<Vec<_>, _>>()
.ok()
})
.unwrap_or_default();
args.iter().flatten().map(OsString::from).collect()
}
fn parse_str_color(s: &str) -> Result<Rgba<u8>, Error> {
s.to_rgba()
.map_err(|_| format_err!("Invalid color: `{}`", s))
}
fn parse_font_str(s: &str) -> Vec<(String, f32)> {
let mut result = vec![];
for font in s.split(';') {
let tmp = font.split('=').collect::<Vec<_>>();
let font_name = tmp[0].to_owned();
let font_size = tmp
.get(1)
.map(|s| s.parse::<f32>().unwrap())
.unwrap_or(26.0);
result.push((font_name, font_size));
}
result
}
fn parse_line_range(s: &str) -> Result<Vec<u32>, ParseIntError> {
let mut result = vec![];
for range in s.split(';') {
let range: Vec<u32> = range
.split('-')
.map(|s| s.parse::<u32>())
.collect::<Result<Vec<_>, _>>()?;
if range.len() == 1 {
result.push(range[0])
} else {
for i in range[0]..=range[1] {
result.push(i);
}
}
}
Ok(result)
}
// https://github.com/TeXitoi/structopt/blob/master/CHANGELOG.md#support-optional-vectors-of-arguments-for-distinguishing-between--o-1-2--o-and-no-option-provided-at-all-by-sphynx-180
type FontList = Vec<(String, f32)>;
type Lines = Vec<u32>;
#[derive(StructOpt, Debug)]
#[structopt(name = "silicon")]
#[structopt(global_setting(ColoredHelp))]
pub struct Config {
/// Background image
#[structopt(long, value_name = "IMAGE", conflicts_with = "background")]
pub background_image: Option<PathBuf>,
/// Background color of the image
#[structopt(
long,
short,
value_name = "COLOR",
default_value = "#aaaaff",
parse(try_from_str = parse_str_color)
)]
pub background: Rgba<u8>,
/// Show the path of silicon config file
#[structopt(long)]
pub config_file: bool,
/// Read input from clipboard.
#[structopt(long)]
pub from_clipboard: bool,
/// File to read. If not set, stdin will be use.
#[structopt(value_name = "FILE", parse(from_os_str))]
pub file: Option<PathBuf>,
/// The fallback font list. eg. 'Hack; SimSun=31'
#[structopt(long, short, value_name = "FONT", parse(from_str = parse_font_str))]
pub font: Option<FontList>,
/// Lines to high light. rg. '1-3; 4'
#[structopt(long, value_name = "LINES", parse(try_from_str = parse_line_range))]
pub highlight_lines: Option<Lines>,
/// The language for syntax highlighting. You can use full name ("Rust") or file extension ("rs").
#[structopt(short, value_name = "LANG", long)]
pub language: Option<String>,
/// Pad between lines
#[structopt(long, value_name = "PAD", default_value = "2")]
pub line_pad: u32,
/// Line number offset
#[structopt(long, value_name = "OFFSET", default_value = "1")]
pub line_offset: u32,
/// List all themes.
#[structopt(long)]
pub list_themes: bool,
/// List all available fonts in your system
#[structopt(long)]
pub list_fonts: bool,
/// Write output image to specific location instead of cwd.
#[structopt(
short,
long,
value_name = "PATH",
required_unless_one = &["config-file", "list-fonts", "list-themes", "to-clipboard", "build-cache"]
)]
pub output: Option<PathBuf>,
/// Hide the window controls.
#[structopt(long)]
pub no_window_controls: bool,
/// Show window title
#[structopt(long, value_name = "WINDOW_TITLE")]
pub window_title: Option<String>,
/// Hide the line number.
#[structopt(long)]
pub no_line_number: bool,
/// Don't round the corner
#[structopt(long)]
pub no_round_corner: bool,
/// Pad horiz
#[structopt(long, value_name = "PAD", default_value = "80")]
pub pad_horiz: u32,
/// Pad vert
#[structopt(long, value_name = "PAD", default_value = "100")]
pub pad_vert: u32,
/// Color of shadow
#[structopt(
long,
value_name = "COLOR",
default_value = "#555555",
parse(try_from_str = parse_str_color)
)]
pub shadow_color: Rgba<u8>,
/// Blur radius of the shadow. (set it to 0 to hide shadow)
#[structopt(long, value_name = "R", default_value = "0")]
pub shadow_blur_radius: f32,
/// Shadow's offset in Y axis
#[structopt(long, value_name = "Y", default_value = "0")]
pub shadow_offset_y: i32,
/// Shadow's offset in X axis
#[structopt(long, value_name = "X", default_value = "0")]
pub shadow_offset_x: i32,
/// Tab width
#[structopt(long, value_name = "WIDTH", default_value = "4")]
pub tab_width: u8,
/// The syntax highlight theme. It can be a theme name or path to a .tmTheme file.
#[structopt(long, value_name = "THEME", default_value = "Dracula")]
pub theme: String,
/// Copy the output image to clipboard.
#[structopt(short = "c", long)]
pub to_clipboard: bool,
// Draw a custom text on the bottom right corner
// #[structopt(long)]
// watermark: Option<String>,
/// build syntax definition and theme cache
#[structopt(long, value_name = "OUTPUT_DIR")]
pub build_cache: Option<Option<PathBuf>>,
}
impl Config {
pub fn get_source_code<'a>(
&self,
ps: &'a SyntaxSet,
) -> Result<(&'a SyntaxReference, String), Error> {
let possible_language = self.language.as_ref().map(|language| {
ps.find_syntax_by_token(language)
.ok_or_else(|| format_err!("Unsupported language: {}", language))
});
if self.from_clipboard {
let mut ctx = ClipboardContext::new()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let code = ctx
.get_contents()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&code)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, code));
}
if let Some(path) = &self.file |
let mut stdin = stdin();
let mut s = String::new();
stdin.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&s)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
Ok((language, s))
}
pub fn theme(&self, ts: &ThemeSet) -> Result<Theme, Error> {
if let Some(theme) = ts.themes.get(&self.theme) {
Ok(theme.clone())
} else {
ThemeSet::get_theme(&self.theme)
.context(format!("Canot load the theme: {}", self.theme))
}
}
pub fn get_formatter(&self) -> Result<ImageFormatter, Error> {
let formatter = ImageFormatterBuilder::new()
.line_pad(self | {
let mut s = String::new();
let mut file = File::open(path)?;
file.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_for_file(path)?
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, s));
} | conditional_block |
evaluator_test.go | ", true},
{" (1 > 2) == true", false},
{"true == true", true},
{"false != true", true},
{"false != false", false},
}
for _, bb := range tests{
evaluated := testEval(bb.input)
testBooleanObject(t, evaluated, bb.expected)
}
}
func testEval (input string) object.Object{
l := lexer.New(input)
p := parser.New(l)
program := p.ParseProgram()
env := object.NewEnvironment()
return Eval(program, env)
}
func testBooleanObject(t *testing.T, obj object.Object, expected bool) bool{
result, ok := obj.(*object.Boolean)
if !ok |
if result.Bool != expected{
t.Errorf("Expected %t but got %t", expected, result.Bool)
return false
}
return true
}
func testIntegerObject(t *testing.T, obj object.Object, expected int64) bool{
result, ok := obj.(*object.Integer)
if !ok {
t.Errorf("Evaluated value is suppose to be of object.Integer Type by found %T",obj)
return false
}
if result.Value != expected {
t.Errorf("Expected Value %d was not equal to the evaluated value %d", expected, result.Value)
return false
}
return true
}
func TestBangOperator(t *testing.T){
tests := []struct{
input string
expected bool
}{
{"!true", false},
{ "!false", true},
{"!5", false},
{"!!true", true},
{"!!5", true},
}
for _, bang := range tests{
evaluated := testEval(bang.input)
testBooleanObject(t, evaluated,bang.expected)
}
}
func TestIfElseExpression(t *testing.T){
tests := []struct{
input string
expected interface{}
}{
{"if (true) { 10 }", 10 },
{"if (false) { 10 }", nil},
{"if ( 1 ) { 10 }", 10 },
{"if ( 1 < 2) { 10 }", 10 },
{"if ( 1 > 2) { 10 }", nil },
{"if ( 1 < 2) { 10 } else { 20 } ", 10 },
{"if ( 1 > 2) { 10 } else { 20 } ", 20 },
}
for _, tt := range tests {
evaluated := testEval(tt.input)
integer, ok := tt.expected.(int)
if ok {
testIntegerObject(t, evaluated, int64(integer))
}else {
testNullObject(t, evaluated)
}
}
}
func testNullObject(t *testing.T, evaluated object.Object) bool {
if evaluated != NULL {
t.Errorf("Object expected to be null but got %T", evaluated)
return false
}
return true
}
func TestReturnStatements(t *testing.T){
tests := []struct{
input string
expected int64
}{
{ "return 10;", 10},
{ "return 10; 9; ", 10},
{ "return 2 * 5; 9;", 10},
{"9; return 2*5; 9;", 10},
{`if (10>1) {
if ( 10>1) {
return 10;
}
return 1;
`, 10},
}
for _,tt := range tests{
evaluated := testEval(tt.input)
testIntegerObject(t,evaluated,tt.expected)
}
}
func TestErrorHandling (t *testing.T){
tests := []struct{
input string
expectedMessage string
}{
{"5 + true;", "type mismatch: INTEGER + BOOLEAN"},
{"5 + true; 5;", "type mismatch: INTEGER + BOOLEAN"},
{"-true", "unknown operator: -BOOLEAN"},
{"true + false;", "unknown operator: BOOLEAN + BOOLEAN"},
{`if (10 > 1) {
if ( 10 > 1) {
return true + true;
}
`, "unknown operator: BOOLEAN + BOOLEAN"},
{"foobar", "identifier not found: foobar"},
{`"Hello"-"World"`, "unknown operator: STRING - STRING"},
{`{"name": "Monkey"}[fn(x) {x}];`, "unusable as hash key: FUNCTION"},
}
for _, tt :=range tests{
evaluated := testEval(tt.input)
errObj, ok := evaluated.(*object.Error)
if ! ok{
t.Errorf("no error object returned. got= %T(%+v)", evaluated, evaluated)
continue
}
if errObj.Message != tt.expectedMessage{
t.Errorf("wrong error message.expected=%q, got=%q", tt.expectedMessage, errObj.Message)
}
}
}
func TestLetStatements(t *testing.T){
tests := []struct{
input string
expected int64
}{
{"let a = 5; a;", 5},
{"let a = 5 * 5; a; ", 25},
{"let a = 5; let b = a; b;", 5},
{"let a = 5; let b = a; let c = a + b + 5; c;", 15},
}
for _, tt := range tests{
testIntegerObject(t, testEval(tt.input), tt.expected)
}
}
func TestFunctionObject(t *testing.T){
input := "fn(x) {x+2;};"
evaluated := testEval(input)
fn, ok := evaluated.(*object.Function)
if !ok{
t.Fatalf("object is not a function. got: %T(%+v)", evaluated, evaluated)
}
if len(fn.Parameters) != 1 {
t.Fatalf("function has wrong number of parameters %+v", fn.Parameters)
}
if fn.Parameters[0].String() != "x"{
t.Fatalf("parameter is not 'x' got %q", fn.Parameters[0])
}
expectBody := "(x + 2)"
if fn.Body.String() != expectBody{
t.Fatalf("body of the function is not %q, got %q", expectBody, fn.Body.String())
}
}
func TestFunctionApplication(t *testing.T){
tests := []struct{
input string
expected int64
}{
{ "let identity = fn(x) {x;} identity(5);", 5},
{"let identity = fn(x) { return x;}; identity(5)", 5},
{"let double = fn(x) { x*2;}; double(5); ",10},
{"let add = fn(x, y) { x + y; }; add(4, 6);", 10},
{"let add = fn(x, y) { x + y; }; add(4 + 6, add(5,5));", 20},
{ "fn(x) {x; }(5)", 5},
{`fn( ) { 5;}()`, 5},
}
for _, tt := range tests{
testIntegerObject(t, testEval(tt.input), tt.expected)
}
}
func TestClosures(t *testing.T){
input := `
let newAdder = fn(x) {
fn(y) { x+y };
};
let addTwo = newAdder(2);
addTwo(2);`
testIntegerObject(t, testEval(input), 4)
}
func TestStringLiteralExpression(t *testing.T){
input := `fn() {"hello world!"}();`
evaluated := testEval(input)
str, ok := evaluated.(*object.String)
if !ok{
t.Fatalf("expected object.String got :%T", evaluated)
}
if str.Value != "hello world!" {
t.Fatalf("expected value %s but got %s","hello world!" ,str.Value)
}
}
func TestStringConcatenation(t *testing.T){
input := `"Hello"+" "+"World!"`
evaluated := testEval(input)
str, ok := evaluated.(*object.String)
if !ok {
t.Fatalf("Object is not String. got= %T", evaluated)
}
if str.Value != "Hello World!" {
t.Fatalf("The expected value of concatenated string %s but got %s", "Hello World!",str.Value)
}
}
func TestBuiltInFunction(t *testing.T){
tests := [] struct{
input string
expected interface{}
}{
{`len("")`, 0},
{`len("four")`, 4},
{"len(1)", "argument to `len` not supported, got INTEGER"},
{`len("one", "two")`, "wrong number of arguments. got 2, want=1"},
{`first([1,2,3,4])`, 1},
{`first([])`, NULL},
{`last([1,2,3,4])`, 4},
{`last([])`, NULL},
| {
t.Errorf("expected value was object.Boolean but got %T",obj)
} | conditional_block |
evaluator_test.go | fn(x) {x}];`, "unusable as hash key: FUNCTION"},
}
for _, tt :=range tests{
evaluated := testEval(tt.input)
errObj, ok := evaluated.(*object.Error)
if ! ok{
t.Errorf("no error object returned. got= %T(%+v)", evaluated, evaluated)
continue
}
if errObj.Message != tt.expectedMessage{
t.Errorf("wrong error message.expected=%q, got=%q", tt.expectedMessage, errObj.Message)
}
}
}
func TestLetStatements(t *testing.T){
tests := []struct{
input string
expected int64
}{
{"let a = 5; a;", 5},
{"let a = 5 * 5; a; ", 25},
{"let a = 5; let b = a; b;", 5},
{"let a = 5; let b = a; let c = a + b + 5; c;", 15},
}
for _, tt := range tests{
testIntegerObject(t, testEval(tt.input), tt.expected)
}
}
func TestFunctionObject(t *testing.T){
input := "fn(x) {x+2;};"
evaluated := testEval(input)
fn, ok := evaluated.(*object.Function)
if !ok{
t.Fatalf("object is not a function. got: %T(%+v)", evaluated, evaluated)
}
if len(fn.Parameters) != 1 {
t.Fatalf("function has wrong number of parameters %+v", fn.Parameters)
}
if fn.Parameters[0].String() != "x"{
t.Fatalf("parameter is not 'x' got %q", fn.Parameters[0])
}
expectBody := "(x + 2)"
if fn.Body.String() != expectBody{
t.Fatalf("body of the function is not %q, got %q", expectBody, fn.Body.String())
}
}
func TestFunctionApplication(t *testing.T){
tests := []struct{
input string
expected int64
}{
{ "let identity = fn(x) {x;} identity(5);", 5},
{"let identity = fn(x) { return x;}; identity(5)", 5},
{"let double = fn(x) { x*2;}; double(5); ",10},
{"let add = fn(x, y) { x + y; }; add(4, 6);", 10},
{"let add = fn(x, y) { x + y; }; add(4 + 6, add(5,5));", 20},
{ "fn(x) {x; }(5)", 5},
{`fn( ) { 5;}()`, 5},
}
for _, tt := range tests{
testIntegerObject(t, testEval(tt.input), tt.expected)
}
}
func TestClosures(t *testing.T){
input := `
let newAdder = fn(x) {
fn(y) { x+y };
};
let addTwo = newAdder(2);
addTwo(2);`
testIntegerObject(t, testEval(input), 4)
}
func TestStringLiteralExpression(t *testing.T){
input := `fn() {"hello world!"}();`
evaluated := testEval(input)
str, ok := evaluated.(*object.String)
if !ok{
t.Fatalf("expected object.String got :%T", evaluated)
}
if str.Value != "hello world!" {
t.Fatalf("expected value %s but got %s","hello world!" ,str.Value)
}
}
func TestStringConcatenation(t *testing.T){
input := `"Hello"+" "+"World!"`
evaluated := testEval(input)
str, ok := evaluated.(*object.String)
if !ok {
t.Fatalf("Object is not String. got= %T", evaluated)
}
if str.Value != "Hello World!" {
t.Fatalf("The expected value of concatenated string %s but got %s", "Hello World!",str.Value)
}
}
func TestBuiltInFunction(t *testing.T){
tests := [] struct{
input string
expected interface{}
}{
{`len("")`, 0},
{`len("four")`, 4},
{"len(1)", "argument to `len` not supported, got INTEGER"},
{`len("one", "two")`, "wrong number of arguments. got 2, want=1"},
{`first([1,2,3,4])`, 1},
{`first([])`, NULL},
{`last([1,2,3,4])`, 4},
{`last([])`, NULL},
}
for _, tt := range tests{
evaluated := testEval(tt.input)
switch expected := tt.expected.(type){
case int:
testIntegerObject(t,evaluated,int64(expected))
case string:
errObj, ok := evaluated.(*object.Error)
if !ok{
t.Errorf("object is not error got.%T(+v)", evaluated, evaluated)
continue
}
if errObj.Message != expected{
t.Errorf("wrong error message. expected %q got %q", expected, errObj.Message)
}
}
}
}
func TestArrayLiterals(t *testing.T){
input := "[1,2 * 2,3 + 3]"
evaluated := testEval(input)
result, ok := evaluated.(*object.Array)
if !ok {
t.Fatalf("Expected to get an *object.Array got %T", evaluated)
}
if len(result.Elements) != 3{
t.Fatalf("Array Should have 3 elements but got %d", len(result.Elements))
}
testIntegerObject(t, result.Elements[0], 1)
testIntegerObject(t, result.Elements[1],4)
testIntegerObject(t, result.Elements[2], 6)
}
func TestArrayIndexExpressions(t *testing.T){
tests := []struct{
input string
expected interface{}
}{
{"[1, 2, 3, 4][0]", 1},
{"[1,2,3][1]", 2},
{"[1,2,3][2]", 3},
{"let i = 0; [1][i];", 1},
{"[1,2,3][1+1];", 3},
{"let myArray = [1,2,3,4]; myArray[2]; ", 3},
{"let myArray = [1,2,3,4]; myArray[0]+ myArray[1]+ myArray[2]; ", 6},
{"[1,2,3][3];", nil},
{"[1,2,3][-1];", nil},
}
for _, tt := range tests{
evaluated := testEval(tt.input)
integer, ok := tt.expected.(int)
if ok {
testIntegerObject(t, evaluated, int64(integer))
}else{
testNullObject(t, evaluated)
}
}
}
func TestHashLiterals(t *testing.T){
input := `let two = "two";
{
"one" : 10 - 9,
"two" : 1 + 1,
"three": 6/2,
4: 4,
true: 5,
false: 6
}
`
evaluated := testEval(input)
result, ok := evaluated.(*object.Hash)
if !ok {
t.Fatalf("Eval did not return Hash got %T", evaluated)
}
expected := map[object.HashKey]int64{
(&object.String{Value: "one"}).HashKey(): 1,
(&object.String{Value: "two"}).HashKey(): 2,
(&object.String{Value: "three"}).HashKey(): 3,
(&object.Integer{Value: 4}).HashKey(): 4,
TRUE.HashKey(): 5,
FALSE.HashKey(): 6,
}
if len(result.Pairs) != len(expected){
t.Fatalf("Hash has wrong number of pairs got %d", len(result.Pairs))
}
for expectedKey, expectedValue := range expected{
pair, ok := result.Pairs[expectedKey]
if !ok {
t.Errorf("no pair for given key in Pairs")
}
testIntegerObject(t,pair.Value, expectedValue)
}
}
func TestHashIndexExpression(t *testing.T){
tests := []struct{
input string
expected interface{}
}{
{`{"foo":5}["foo"]`, 5},
{`{"foo": 5}["bar"]`, nil},
{`let key ="foo"; {"foo": 5}[key];`, 5},
{`{}["foo"]`, nil},
{`{true: 5}[true]`, 5},
//{`{ 5 : 5}[5]'`,5},
}
for _, tt := range tests{
evaluated := testEval(tt.input)
integer, ok := tt.expected.(int)
if ok {
testIntegerObject(t, evaluated, int64(integer))
}else{
testNullObject(t, evaluated)
}
} | random_line_split |
||
evaluator_test.go | ", true},
{" (1 > 2) == true", false},
{"true == true", true},
{"false != true", true},
{"false != false", false},
}
for _, bb := range tests{
evaluated := testEval(bb.input)
testBooleanObject(t, evaluated, bb.expected)
}
}
func testEval (input string) object.Object{
l := lexer.New(input)
p := parser.New(l)
program := p.ParseProgram()
env := object.NewEnvironment()
return Eval(program, env)
}
func testBooleanObject(t *testing.T, obj object.Object, expected bool) bool{
result, ok := obj.(*object.Boolean)
if !ok{
t.Errorf("expected value was object.Boolean but got %T",obj)
}
if result.Bool != expected{
t.Errorf("Expected %t but got %t", expected, result.Bool)
return false
}
return true
}
func testIntegerObject(t *testing.T, obj object.Object, expected int64) bool{
result, ok := obj.(*object.Integer)
if !ok {
t.Errorf("Evaluated value is suppose to be of object.Integer Type by found %T",obj)
return false
}
if result.Value != expected {
t.Errorf("Expected Value %d was not equal to the evaluated value %d", expected, result.Value)
return false
}
return true
}
func TestBangOperator(t *testing.T){
tests := []struct{
input string
expected bool
}{
{"!true", false},
{ "!false", true},
{"!5", false},
{"!!true", true},
{"!!5", true},
}
for _, bang := range tests{
evaluated := testEval(bang.input)
testBooleanObject(t, evaluated,bang.expected)
}
}
func TestIfElseExpression(t *testing.T){
tests := []struct{
input string
expected interface{}
}{
{"if (true) { 10 }", 10 },
{"if (false) { 10 }", nil},
{"if ( 1 ) { 10 }", 10 },
{"if ( 1 < 2) { 10 }", 10 },
{"if ( 1 > 2) { 10 }", nil },
{"if ( 1 < 2) { 10 } else { 20 } ", 10 },
{"if ( 1 > 2) { 10 } else { 20 } ", 20 },
}
for _, tt := range tests {
evaluated := testEval(tt.input)
integer, ok := tt.expected.(int)
if ok {
testIntegerObject(t, evaluated, int64(integer))
}else {
testNullObject(t, evaluated)
}
}
}
func testNullObject(t *testing.T, evaluated object.Object) bool {
if evaluated != NULL {
t.Errorf("Object expected to be null but got %T", evaluated)
return false
}
return true
}
func TestReturnStatements(t *testing.T){
tests := []struct{
input string
expected int64
}{
{ "return 10;", 10},
{ "return 10; 9; ", 10},
{ "return 2 * 5; 9;", 10},
{"9; return 2*5; 9;", 10},
{`if (10>1) {
if ( 10>1) {
return 10;
}
return 1;
`, 10},
}
for _,tt := range tests{
evaluated := testEval(tt.input)
testIntegerObject(t,evaluated,tt.expected)
}
}
func TestErrorHandling (t *testing.T){
tests := []struct{
input string
expectedMessage string
}{
{"5 + true;", "type mismatch: INTEGER + BOOLEAN"},
{"5 + true; 5;", "type mismatch: INTEGER + BOOLEAN"},
{"-true", "unknown operator: -BOOLEAN"},
{"true + false;", "unknown operator: BOOLEAN + BOOLEAN"},
{`if (10 > 1) {
if ( 10 > 1) {
return true + true;
}
`, "unknown operator: BOOLEAN + BOOLEAN"},
{"foobar", "identifier not found: foobar"},
{`"Hello"-"World"`, "unknown operator: STRING - STRING"},
{`{"name": "Monkey"}[fn(x) {x}];`, "unusable as hash key: FUNCTION"},
}
for _, tt :=range tests{
evaluated := testEval(tt.input)
errObj, ok := evaluated.(*object.Error)
if ! ok{
t.Errorf("no error object returned. got= %T(%+v)", evaluated, evaluated)
continue
}
if errObj.Message != tt.expectedMessage{
t.Errorf("wrong error message.expected=%q, got=%q", tt.expectedMessage, errObj.Message)
}
}
}
func TestLetStatements(t *testing.T){
tests := []struct{
input string
expected int64
}{
{"let a = 5; a;", 5},
{"let a = 5 * 5; a; ", 25},
{"let a = 5; let b = a; b;", 5},
{"let a = 5; let b = a; let c = a + b + 5; c;", 15},
}
for _, tt := range tests{
testIntegerObject(t, testEval(tt.input), tt.expected)
}
}
func TestFunctionObject(t *testing.T) | }
func TestFunctionApplication(t *testing.T){
tests := []struct{
input string
expected int64
}{
{ "let identity = fn(x) {x;} identity(5);", 5},
{"let identity = fn(x) { return x;}; identity(5)", 5},
{"let double = fn(x) { x*2;}; double(5); ",10},
{"let add = fn(x, y) { x + y; }; add(4, 6);", 10},
{"let add = fn(x, y) { x + y; }; add(4 + 6, add(5,5));", 20},
{ "fn(x) {x; }(5)", 5},
{`fn( ) { 5;}()`, 5},
}
for _, tt := range tests{
testIntegerObject(t, testEval(tt.input), tt.expected)
}
}
func TestClosures(t *testing.T){
input := `
let newAdder = fn(x) {
fn(y) { x+y };
};
let addTwo = newAdder(2);
addTwo(2);`
testIntegerObject(t, testEval(input), 4)
}
func TestStringLiteralExpression(t *testing.T){
input := `fn() {"hello world!"}();`
evaluated := testEval(input)
str, ok := evaluated.(*object.String)
if !ok{
t.Fatalf("expected object.String got :%T", evaluated)
}
if str.Value != "hello world!" {
t.Fatalf("expected value %s but got %s","hello world!" ,str.Value)
}
}
func TestStringConcatenation(t *testing.T){
input := `"Hello"+" "+"World!"`
evaluated := testEval(input)
str, ok := evaluated.(*object.String)
if !ok {
t.Fatalf("Object is not String. got= %T", evaluated)
}
if str.Value != "Hello World!" {
t.Fatalf("The expected value of concatenated string %s but got %s", "Hello World!",str.Value)
}
}
func TestBuiltInFunction(t *testing.T){
tests := [] struct{
input string
expected interface{}
}{
{`len("")`, 0},
{`len("four")`, 4},
{"len(1)", "argument to `len` not supported, got INTEGER"},
{`len("one", "two")`, "wrong number of arguments. got 2, want=1"},
{`first([1,2,3,4])`, 1},
{`first([])`, NULL},
{`last([1,2,3,4])`, 4},
{`last([])`, NULL},
| {
input := "fn(x) {x+2;};"
evaluated := testEval(input)
fn, ok := evaluated.(*object.Function)
if !ok{
t.Fatalf("object is not a function. got: %T(%+v)", evaluated, evaluated)
}
if len(fn.Parameters) != 1 {
t.Fatalf("function has wrong number of parameters %+v", fn.Parameters)
}
if fn.Parameters[0].String() != "x"{
t.Fatalf("parameter is not 'x' got %q", fn.Parameters[0])
}
expectBody := "(x + 2)"
if fn.Body.String() != expectBody{
t.Fatalf("body of the function is not %q, got %q", expectBody, fn.Body.String())
} | identifier_body |
evaluator_test.go | ", true},
{" (1 > 2) == true", false},
{"true == true", true},
{"false != true", true},
{"false != false", false},
}
for _, bb := range tests{
evaluated := testEval(bb.input)
testBooleanObject(t, evaluated, bb.expected)
}
}
func testEval (input string) object.Object{
l := lexer.New(input)
p := parser.New(l)
program := p.ParseProgram()
env := object.NewEnvironment()
return Eval(program, env)
}
func testBooleanObject(t *testing.T, obj object.Object, expected bool) bool{
result, ok := obj.(*object.Boolean)
if !ok{
t.Errorf("expected value was object.Boolean but got %T",obj)
}
if result.Bool != expected{
t.Errorf("Expected %t but got %t", expected, result.Bool)
return false
}
return true
}
func testIntegerObject(t *testing.T, obj object.Object, expected int64) bool{
result, ok := obj.(*object.Integer)
if !ok {
t.Errorf("Evaluated value is suppose to be of object.Integer Type by found %T",obj)
return false
}
if result.Value != expected {
t.Errorf("Expected Value %d was not equal to the evaluated value %d", expected, result.Value)
return false
}
return true
}
func TestBangOperator(t *testing.T){
tests := []struct{
input string
expected bool
}{
{"!true", false},
{ "!false", true},
{"!5", false},
{"!!true", true},
{"!!5", true},
}
for _, bang := range tests{
evaluated := testEval(bang.input)
testBooleanObject(t, evaluated,bang.expected)
}
}
func TestIfElseExpression(t *testing.T){
tests := []struct{
input string
expected interface{}
}{
{"if (true) { 10 }", 10 },
{"if (false) { 10 }", nil},
{"if ( 1 ) { 10 }", 10 },
{"if ( 1 < 2) { 10 }", 10 },
{"if ( 1 > 2) { 10 }", nil },
{"if ( 1 < 2) { 10 } else { 20 } ", 10 },
{"if ( 1 > 2) { 10 } else { 20 } ", 20 },
}
for _, tt := range tests {
evaluated := testEval(tt.input)
integer, ok := tt.expected.(int)
if ok {
testIntegerObject(t, evaluated, int64(integer))
}else {
testNullObject(t, evaluated)
}
}
}
func testNullObject(t *testing.T, evaluated object.Object) bool {
if evaluated != NULL {
t.Errorf("Object expected to be null but got %T", evaluated)
return false
}
return true
}
func TestReturnStatements(t *testing.T){
tests := []struct{
input string
expected int64
}{
{ "return 10;", 10},
{ "return 10; 9; ", 10},
{ "return 2 * 5; 9;", 10},
{"9; return 2*5; 9;", 10},
{`if (10>1) {
if ( 10>1) {
return 10;
}
return 1;
`, 10},
}
for _,tt := range tests{
evaluated := testEval(tt.input)
testIntegerObject(t,evaluated,tt.expected)
}
}
func TestErrorHandling (t *testing.T){
tests := []struct{
input string
expectedMessage string
}{
{"5 + true;", "type mismatch: INTEGER + BOOLEAN"},
{"5 + true; 5;", "type mismatch: INTEGER + BOOLEAN"},
{"-true", "unknown operator: -BOOLEAN"},
{"true + false;", "unknown operator: BOOLEAN + BOOLEAN"},
{`if (10 > 1) {
if ( 10 > 1) {
return true + true;
}
`, "unknown operator: BOOLEAN + BOOLEAN"},
{"foobar", "identifier not found: foobar"},
{`"Hello"-"World"`, "unknown operator: STRING - STRING"},
{`{"name": "Monkey"}[fn(x) {x}];`, "unusable as hash key: FUNCTION"},
}
for _, tt :=range tests{
evaluated := testEval(tt.input)
errObj, ok := evaluated.(*object.Error)
if ! ok{
t.Errorf("no error object returned. got= %T(%+v)", evaluated, evaluated)
continue
}
if errObj.Message != tt.expectedMessage{
t.Errorf("wrong error message.expected=%q, got=%q", tt.expectedMessage, errObj.Message)
}
}
}
func TestLetStatements(t *testing.T){
tests := []struct{
input string
expected int64
}{
{"let a = 5; a;", 5},
{"let a = 5 * 5; a; ", 25},
{"let a = 5; let b = a; b;", 5},
{"let a = 5; let b = a; let c = a + b + 5; c;", 15},
}
for _, tt := range tests{
testIntegerObject(t, testEval(tt.input), tt.expected)
}
}
func TestFunctionObject(t *testing.T){
input := "fn(x) {x+2;};"
evaluated := testEval(input)
fn, ok := evaluated.(*object.Function)
if !ok{
t.Fatalf("object is not a function. got: %T(%+v)", evaluated, evaluated)
}
if len(fn.Parameters) != 1 {
t.Fatalf("function has wrong number of parameters %+v", fn.Parameters)
}
if fn.Parameters[0].String() != "x"{
t.Fatalf("parameter is not 'x' got %q", fn.Parameters[0])
}
expectBody := "(x + 2)"
if fn.Body.String() != expectBody{
t.Fatalf("body of the function is not %q, got %q", expectBody, fn.Body.String())
}
}
func TestFunctionApplication(t *testing.T){
tests := []struct{
input string
expected int64
}{
{ "let identity = fn(x) {x;} identity(5);", 5},
{"let identity = fn(x) { return x;}; identity(5)", 5},
{"let double = fn(x) { x*2;}; double(5); ",10},
{"let add = fn(x, y) { x + y; }; add(4, 6);", 10},
{"let add = fn(x, y) { x + y; }; add(4 + 6, add(5,5));", 20},
{ "fn(x) {x; }(5)", 5},
{`fn( ) { 5;}()`, 5},
}
for _, tt := range tests{
testIntegerObject(t, testEval(tt.input), tt.expected)
}
}
func TestClosures(t *testing.T){
input := `
let newAdder = fn(x) {
fn(y) { x+y };
};
let addTwo = newAdder(2);
addTwo(2);`
testIntegerObject(t, testEval(input), 4)
}
func TestStringLiteralExpression(t *testing.T){
input := `fn() {"hello world!"}();`
evaluated := testEval(input)
str, ok := evaluated.(*object.String)
if !ok{
t.Fatalf("expected object.String got :%T", evaluated)
}
if str.Value != "hello world!" {
t.Fatalf("expected value %s but got %s","hello world!" ,str.Value)
}
}
func | (t *testing.T){
input := `"Hello"+" "+"World!"`
evaluated := testEval(input)
str, ok := evaluated.(*object.String)
if !ok {
t.Fatalf("Object is not String. got= %T", evaluated)
}
if str.Value != "Hello World!" {
t.Fatalf("The expected value of concatenated string %s but got %s", "Hello World!",str.Value)
}
}
func TestBuiltInFunction(t *testing.T){
tests := [] struct{
input string
expected interface{}
}{
{`len("")`, 0},
{`len("four")`, 4},
{"len(1)", "argument to `len` not supported, got INTEGER"},
{`len("one", "two")`, "wrong number of arguments. got 2, want=1"},
{`first([1,2,3,4])`, 1},
{`first([])`, NULL},
{`last([1,2,3,4])`, 4},
{`last([])`, NULL},
| TestStringConcatenation | identifier_name |
main.go | // DefaultAddress is the default address to serve from.
DefaultAddress string = ":8877"
// PrimoDomain is the domain at which Primo instances are hosted.
PrimoDomain string = "primo.exlibrisgroup.com"
// subDomain is the institution domain
subDomain string = "ocul-qu"
// instVID is the institution vid
instVID string = "01OCUL_QU:QU_DEFAULT"
// MaxMappingFileLength is the maximum number of lines in a mapping file.
MaxMappingFileLength uint64 = 1000000
// RecordURLPrefix is the prefix of the path of requests to catalogues for the permalink of a record.
RecordPrefix string = "/vwebv/holdingsInfo"
// PatronInfoPrefix is the prefix of the path of requests to catalogues for the patron login form.
PatronInfoPrefix2 string = "/vwebv/login"
// PatronInfoPrefix is the prefix of the path of requests to catalogues for the patron login form.
PatronInfoPrefix string = "/vwebv/my"
// SearchPrefix is the prefix of the path of requests to catalogues for search results.
SearchPrefix string = "/vwebv/search"
)
// A version flag, which should be overwritten when building using ldflags.
var version = "devel"
// Detourer is a struct which stores the data needed to perform redirects.
type Detourer struct {
idMap map[uint32]uint64 // The map of BibIDs to ExL IDs.
primo string // The domain name (host) for the target Primo instance.
vid string // The vid parameter to use when building Primo URLs.
}
// The Detourer serves HTTP redirects based on the request.
func (d Detourer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// In the default case, redirect to the Primo search form.
redirectTo := &url.URL{
Scheme: "https",
Host: d.primo,
Path: "/discovery/search",
}
// Depending on the prefix...
switch {
case strings.HasPrefix(r.URL.Path, RecordPrefix):
buildRecordRedirect(redirectTo, r, d.idMap)
case strings.HasPrefix(r.URL.Path, PatronInfoPrefix):
redirectTo.Path = "/discovery/login"
case strings.HasPrefix(r.URL.Path, PatronInfoPrefix2):
redirectTo.Path = "/discovery/login"
case strings.HasPrefix(r.URL.Path, SearchPrefix):
buildSearchRedirect(redirectTo, r)
}
// Set the vid parameter on all redirects.
setParamInURL(redirectTo, "vid", d.vid)
// Send the redirect to the client.
// http.Redirect(w, r, redirectTo.String(), http.StatusMovedPermanently)
http.Redirect(w, r, redirectTo.String(), http.StatusTemporaryRedirect)
}
// buildRecordRedirect updates redirectTo to the correct Primo record URL for the requested bibID.
func buildRecordRedirect(redirectTo *url.URL, r *http.Request, idMap map[uint32]uint64) {
q := r.URL.Query()
// bibID64, err := strconv.ParseUint(r.URL.Path[len(RecordPrefix):], 10, 32)
bibID64, err := strconv.ParseUint(q.Get("bibId"), 10, 32)
if err == nil {
bibID := uint32(bibID64)
exlID, present := idMap[bibID]
if present {
redirectTo.Path = "/discovery/fulldisplay"
setParamInURL(redirectTo, "docid", fmt.Sprintf("alma%v", exlID))
} else {
log.Printf("Not found: %v", bibID64)
}
} else {
log.Fatalln(err)
}
}
// SearchAuthorIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=NAME"
// SearchCallNumberIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=CALL"
// SearchTitleIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=T"
// SearchJournalIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=JALL"
// buildSearchRedirect updates redirectTo to an approximate Primo URL for the requested search.
func buildSearchRedirect(redirectTo *url.URL, r *http.Request) {
q := r.URL.Query()
setParamInURL(redirectTo, "tab", "Everything")
setParamInURL(redirectTo, "search_scope", "MyInst_and_CI")
if q.Get("searchArg") != "" {
switch q.Get("searchCode") {
case "TKEY^":
setParamInURL(redirectTo, "query", fmt.Sprintf("title,contains,%v", q.Get("searchArg")))
case "TALL":
setParamInURL(redirectTo, "query", fmt.Sprintf("title,contains,%v", q.Get("searchArg")))
case "NAME":
redirectTo.Path = "/discovery/browse"
setParamInURL(redirectTo, "browseScope", "author")
setParamInURL(redirectTo, "browseQuery", q.Get("searchArg"))
case "CALL":
redirectTo.Path = "/discovery/browse"
setParamInURL(redirectTo, "browseScope", "callnumber.0")
setParamInURL(redirectTo, "browseQuery", q.Get("searchArg"))
case "JALL":
redirectTo.Path = "/discovery/jsearch"
setParamInURL(redirectTo, "tab", "jsearch_slot")
setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg")))
default:
setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg")))
}
} else if q.Get("SEARCH") != "" {
setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("SEARCH")))
}
}
func main() {
// Define the command line flags.
addr := flag.String("address", DefaultAddress, "Address to bind on.")
subdomain := flag.String("primo", subDomain, "The subdomain of the target Primo instance, ?????.primo.exlibrisgroup.com.")
vid := flag.String("vid", instVID, "VID parameter for Primo.")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Permanent Detour: A tiny web service which redirects Voyager Web OPAC requests to Primo URLs.\n")
fmt.Fprintf(os.Stderr, "Version %v\n", version)
fmt.Fprintf(os.Stderr, "Usage: permanentdetour [flag...] [file...]\n")
flag.PrintDefaults()
fmt.Fprintln(os.Stderr, " Environment variables read when flag is unset:")
flag.VisitAll(func(f *flag.Flag) {
uppercaseName := strings.ToUpper(f.Name)
fmt.Fprintf(os.Stderr, " %v%v\n", EnvPrefix, uppercaseName)
})
}
// Process the flags.
flag.Parse()
// If any flags have not been set, see if there are
// environment variables that set them.
err := overrideUnsetFlagsFromEnvironmentVariables()
if err != nil {
log.Fatalln(err)
}
// The Detourer has all the data needed to build redirects.
d := Detourer{
primo: fmt.Sprintf("%v.%v", *subdomain, PrimoDomain),
vid: *vid,
}
// Map of BibIDs to ExL IDs
// The initial size is an estimate based on the number of arguments.
size := uint64(len(flag.Args())) * MaxMappingFileLength
d.idMap = make(map[uint32]uint64, size)
// Process each file in the arguments list.
for _, mappingFilePath := range flag.Args() {
// Add the mappings from this file to the idMap.
err := processFile(d.idMap, mappingFilePath)
if err != nil {
log.Fatal(err)
}
}
log.Printf("%v VGer BibID to Ex Libris ID mappings processed.\n", len(d.idMap))
// Use an explicit request multiplexer.
mux := http.NewServeMux()
mux.Handle("/", d)
server := http.Server{
Addr: *addr,
Handler: mux,
}
shutdown := make(chan struct{})
go func() {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
// Wait to receive a message on the channel.
<-sigs
err := server.Shutdown(context.Background())
if err != nil {
log.Printf("Error shutting down server, %v.\n", err)
}
close(shutdown)
}()
log.Println("Starting server.")
err = server.ListenAndServe()
if err != http.ErrServerClosed {
log.Fatalf("Fatal server error, %v.\n", err)
}
<-shutdown
log.Println("Server stopped.")
}
// processFile takes a file path, opens the file, and reads it line by line to extract id mappings.
func processFile(m map[uint32]uint64, mappingFilePath string) error {
// Get the absolute path of the file. Not | // EnvPrefix is the prefix for the environment variables.
EnvPrefix string = "PERMANENTDETOUR_"
| random_line_split |
|
main.go | vid parameter on all redirects.
setParamInURL(redirectTo, "vid", d.vid)
// Send the redirect to the client.
// http.Redirect(w, r, redirectTo.String(), http.StatusMovedPermanently)
http.Redirect(w, r, redirectTo.String(), http.StatusTemporaryRedirect)
}
// buildRecordRedirect updates redirectTo to the correct Primo record URL for the requested bibID.
func buildRecordRedirect(redirectTo *url.URL, r *http.Request, idMap map[uint32]uint64) {
q := r.URL.Query()
// bibID64, err := strconv.ParseUint(r.URL.Path[len(RecordPrefix):], 10, 32)
bibID64, err := strconv.ParseUint(q.Get("bibId"), 10, 32)
if err == nil {
bibID := uint32(bibID64)
exlID, present := idMap[bibID]
if present {
redirectTo.Path = "/discovery/fulldisplay"
setParamInURL(redirectTo, "docid", fmt.Sprintf("alma%v", exlID))
} else {
log.Printf("Not found: %v", bibID64)
}
} else {
log.Fatalln(err)
}
}
// SearchAuthorIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=NAME"
// SearchCallNumberIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=CALL"
// SearchTitleIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=T"
// SearchJournalIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=JALL"
// buildSearchRedirect updates redirectTo to an approximate Primo URL for the requested search.
func buildSearchRedirect(redirectTo *url.URL, r *http.Request) {
q := r.URL.Query()
setParamInURL(redirectTo, "tab", "Everything")
setParamInURL(redirectTo, "search_scope", "MyInst_and_CI")
if q.Get("searchArg") != "" {
switch q.Get("searchCode") {
case "TKEY^":
setParamInURL(redirectTo, "query", fmt.Sprintf("title,contains,%v", q.Get("searchArg")))
case "TALL":
setParamInURL(redirectTo, "query", fmt.Sprintf("title,contains,%v", q.Get("searchArg")))
case "NAME":
redirectTo.Path = "/discovery/browse"
setParamInURL(redirectTo, "browseScope", "author")
setParamInURL(redirectTo, "browseQuery", q.Get("searchArg"))
case "CALL":
redirectTo.Path = "/discovery/browse"
setParamInURL(redirectTo, "browseScope", "callnumber.0")
setParamInURL(redirectTo, "browseQuery", q.Get("searchArg"))
case "JALL":
redirectTo.Path = "/discovery/jsearch"
setParamInURL(redirectTo, "tab", "jsearch_slot")
setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg")))
default:
setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg")))
}
} else if q.Get("SEARCH") != "" {
setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("SEARCH")))
}
}
func main() {
// Define the command line flags.
addr := flag.String("address", DefaultAddress, "Address to bind on.")
subdomain := flag.String("primo", subDomain, "The subdomain of the target Primo instance, ?????.primo.exlibrisgroup.com.")
vid := flag.String("vid", instVID, "VID parameter for Primo.")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Permanent Detour: A tiny web service which redirects Voyager Web OPAC requests to Primo URLs.\n")
fmt.Fprintf(os.Stderr, "Version %v\n", version)
fmt.Fprintf(os.Stderr, "Usage: permanentdetour [flag...] [file...]\n")
flag.PrintDefaults()
fmt.Fprintln(os.Stderr, " Environment variables read when flag is unset:")
flag.VisitAll(func(f *flag.Flag) {
uppercaseName := strings.ToUpper(f.Name)
fmt.Fprintf(os.Stderr, " %v%v\n", EnvPrefix, uppercaseName)
})
}
// Process the flags.
flag.Parse()
// If any flags have not been set, see if there are
// environment variables that set them.
err := overrideUnsetFlagsFromEnvironmentVariables()
if err != nil {
log.Fatalln(err)
}
// The Detourer has all the data needed to build redirects.
d := Detourer{
primo: fmt.Sprintf("%v.%v", *subdomain, PrimoDomain),
vid: *vid,
}
// Map of BibIDs to ExL IDs
// The initial size is an estimate based on the number of arguments.
size := uint64(len(flag.Args())) * MaxMappingFileLength
d.idMap = make(map[uint32]uint64, size)
// Process each file in the arguments list.
for _, mappingFilePath := range flag.Args() {
// Add the mappings from this file to the idMap.
err := processFile(d.idMap, mappingFilePath)
if err != nil {
log.Fatal(err)
}
}
log.Printf("%v VGer BibID to Ex Libris ID mappings processed.\n", len(d.idMap))
// Use an explicit request multiplexer.
mux := http.NewServeMux()
mux.Handle("/", d)
server := http.Server{
Addr: *addr,
Handler: mux,
}
shutdown := make(chan struct{})
go func() {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
// Wait to receive a message on the channel.
<-sigs
err := server.Shutdown(context.Background())
if err != nil {
log.Printf("Error shutting down server, %v.\n", err)
}
close(shutdown)
}()
log.Println("Starting server.")
err = server.ListenAndServe()
if err != http.ErrServerClosed {
log.Fatalf("Fatal server error, %v.\n", err)
}
<-shutdown
log.Println("Server stopped.")
}
// processFile takes a file path, opens the file, and reads it line by line to extract id mappings.
func processFile(m map[uint32]uint64, mappingFilePath string) error {
// Get the absolute path of the file. Not strictly necessary, but creates clearer error messages.
absFilePath, err := filepath.Abs(mappingFilePath)
if err != nil {
return fmt.Errorf("Could not get absolute path of %v, %v.\n", mappingFilePath, err)
}
// Open the file for reading. Close the file automatically when done.
file, err := os.Open(absFilePath)
if err != nil {
return fmt.Errorf("Could not open %v for reading, %v.\n", absFilePath, err)
}
defer file.Close()
// Read the file line by line.
scanner := bufio.NewScanner(file)
lnum := 0
for scanner.Scan() {
lnum += 1
bibID, exlID, err := processLine(scanner.Text())
if err != nil {
return fmt.Errorf("Unable to process line %v '%v', %v.\n", lnum, scanner.Text(), err)
}
_, present := m[bibID]
if present {
return fmt.Errorf("Previously seen Bib ID %v was encountered.\n", bibID)
}
m[bibID] = exlID
}
err = scanner.Err()
if err != nil {
return fmt.Errorf("Scanner error when processing %v, %v.\n", absFilePath, err)
}
return nil
}
// processLine takes a line of input, and finds the bibID and the exL ID.
func processLine(line string) (bibID uint32, exlID uint64, _ error) | {
// Split the input line into fields on commas.
splitLine := strings.Split(line, ",")
if len(splitLine) < 2 {
return bibID, exlID, fmt.Errorf("Line has incorrect number of fields, 2 expected, %v found.\n", len(splitLine))
}
// The bibIDs look like this: a1234-instid
// We need to strip off the first character and anything after the dash.
dashIndex := strings.Index(splitLine[1], "-")
if (dashIndex == 0) || (dashIndex == 1) {
return bibID, exlID, fmt.Errorf("No bibID number was found before dash between bibID and institution id.\n")
}
bibIDString := "invalid"
// If the dash isn't found, use the whole bibID field except the first character.
if dashIndex == -1 {
bibIDString = splitLine[1]
} else {
bibIDString = splitLine[1][0:dashIndex]
}
bibID64, err := strconv.ParseUint(bibIDString, 10, 32) | identifier_body |
|
main.go | ParamInURL(redirectTo, "query", fmt.Sprintf("title,contains,%v", q.Get("searchArg")))
case "NAME":
redirectTo.Path = "/discovery/browse"
setParamInURL(redirectTo, "browseScope", "author")
setParamInURL(redirectTo, "browseQuery", q.Get("searchArg"))
case "CALL":
redirectTo.Path = "/discovery/browse"
setParamInURL(redirectTo, "browseScope", "callnumber.0")
setParamInURL(redirectTo, "browseQuery", q.Get("searchArg"))
case "JALL":
redirectTo.Path = "/discovery/jsearch"
setParamInURL(redirectTo, "tab", "jsearch_slot")
setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg")))
default:
setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg")))
}
} else if q.Get("SEARCH") != "" {
setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("SEARCH")))
}
}
func main() {
// Define the command line flags.
addr := flag.String("address", DefaultAddress, "Address to bind on.")
subdomain := flag.String("primo", subDomain, "The subdomain of the target Primo instance, ?????.primo.exlibrisgroup.com.")
vid := flag.String("vid", instVID, "VID parameter for Primo.")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Permanent Detour: A tiny web service which redirects Voyager Web OPAC requests to Primo URLs.\n")
fmt.Fprintf(os.Stderr, "Version %v\n", version)
fmt.Fprintf(os.Stderr, "Usage: permanentdetour [flag...] [file...]\n")
flag.PrintDefaults()
fmt.Fprintln(os.Stderr, " Environment variables read when flag is unset:")
flag.VisitAll(func(f *flag.Flag) {
uppercaseName := strings.ToUpper(f.Name)
fmt.Fprintf(os.Stderr, " %v%v\n", EnvPrefix, uppercaseName)
})
}
// Process the flags.
flag.Parse()
// If any flags have not been set, see if there are
// environment variables that set them.
err := overrideUnsetFlagsFromEnvironmentVariables()
if err != nil {
log.Fatalln(err)
}
// The Detourer has all the data needed to build redirects.
d := Detourer{
primo: fmt.Sprintf("%v.%v", *subdomain, PrimoDomain),
vid: *vid,
}
// Map of BibIDs to ExL IDs
// The initial size is an estimate based on the number of arguments.
size := uint64(len(flag.Args())) * MaxMappingFileLength
d.idMap = make(map[uint32]uint64, size)
// Process each file in the arguments list.
for _, mappingFilePath := range flag.Args() {
// Add the mappings from this file to the idMap.
err := processFile(d.idMap, mappingFilePath)
if err != nil {
log.Fatal(err)
}
}
log.Printf("%v VGer BibID to Ex Libris ID mappings processed.\n", len(d.idMap))
// Use an explicit request multiplexer.
mux := http.NewServeMux()
mux.Handle("/", d)
server := http.Server{
Addr: *addr,
Handler: mux,
}
shutdown := make(chan struct{})
go func() {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
// Wait to receive a message on the channel.
<-sigs
err := server.Shutdown(context.Background())
if err != nil {
log.Printf("Error shutting down server, %v.\n", err)
}
close(shutdown)
}()
log.Println("Starting server.")
err = server.ListenAndServe()
if err != http.ErrServerClosed {
log.Fatalf("Fatal server error, %v.\n", err)
}
<-shutdown
log.Println("Server stopped.")
}
// processFile takes a file path, opens the file, and reads it line by line to extract id mappings.
func processFile(m map[uint32]uint64, mappingFilePath string) error {
// Get the absolute path of the file. Not strictly necessary, but creates clearer error messages.
absFilePath, err := filepath.Abs(mappingFilePath)
if err != nil {
return fmt.Errorf("Could not get absolute path of %v, %v.\n", mappingFilePath, err)
}
// Open the file for reading. Close the file automatically when done.
file, err := os.Open(absFilePath)
if err != nil {
return fmt.Errorf("Could not open %v for reading, %v.\n", absFilePath, err)
}
defer file.Close()
// Read the file line by line.
scanner := bufio.NewScanner(file)
lnum := 0
for scanner.Scan() {
lnum += 1
bibID, exlID, err := processLine(scanner.Text())
if err != nil {
return fmt.Errorf("Unable to process line %v '%v', %v.\n", lnum, scanner.Text(), err)
}
_, present := m[bibID]
if present {
return fmt.Errorf("Previously seen Bib ID %v was encountered.\n", bibID)
}
m[bibID] = exlID
}
err = scanner.Err()
if err != nil {
return fmt.Errorf("Scanner error when processing %v, %v.\n", absFilePath, err)
}
return nil
}
// processLine takes a line of input, and finds the bibID and the exL ID.
func processLine(line string) (bibID uint32, exlID uint64, _ error) {
// Split the input line into fields on commas.
splitLine := strings.Split(line, ",")
if len(splitLine) < 2 {
return bibID, exlID, fmt.Errorf("Line has incorrect number of fields, 2 expected, %v found.\n", len(splitLine))
}
// The bibIDs look like this: a1234-instid
// We need to strip off the first character and anything after the dash.
dashIndex := strings.Index(splitLine[1], "-")
if (dashIndex == 0) || (dashIndex == 1) {
return bibID, exlID, fmt.Errorf("No bibID number was found before dash between bibID and institution id.\n")
}
bibIDString := "invalid"
// If the dash isn't found, use the whole bibID field except the first character.
if dashIndex == -1 {
bibIDString = splitLine[1]
} else {
bibIDString = splitLine[1][0:dashIndex]
}
bibID64, err := strconv.ParseUint(bibIDString, 10, 32)
if err != nil {
return bibID, exlID, err
}
bibID = uint32(bibID64)
exlID, err = strconv.ParseUint(splitLine[0], 10, 64)
if err != nil {
return bibID, exlID, err
}
return bibID, exlID, nil
}
// If any flags are not set, use environment variables to set them.
func overrideUnsetFlagsFromEnvironmentVariables() error {
// A map of pointers to unset flags.
listOfUnsetFlags := make(map[*flag.Flag]bool)
// flag.Visit calls a function on "only those flags that have been set."
// flag.VisitAll calls a function on "all flags, even those not set."
// No way to ask for "only unset flags". So, we add all, then
// delete the set flags.
// First, visit all the flags, and add them to our map.
flag.VisitAll(func(f *flag.Flag) { listOfUnsetFlags[f] = true })
// Then delete the set flags.
flag.Visit(func(f *flag.Flag) { delete(listOfUnsetFlags, f) })
// Loop through our list of unset flags.
// We don't care about the values in our map, only the keys.
for k := range listOfUnsetFlags {
// Build the corresponding environment variable name for each flag.
uppercaseName := strings.ToUpper(k.Name)
environmentVariableName := fmt.Sprintf("%v%v", EnvPrefix, uppercaseName)
// Look for the environment variable name.
// If found, set the flag to that value.
// If there's a problem setting the flag value,
// there's a serious problem we can't recover from.
environmentVariableValue := os.Getenv(environmentVariableName)
if environmentVariableValue != "" {
err := k.Value.Set(environmentVariableValue)
if err != nil {
fmt.Errorf("Unable to set configuration option %v from environment variable %v, "+
"which has a value of \"%v\"",
k.Name, environmentVariableName, environmentVariableValue)
}
}
}
return nil
}
// setParamInURL is a helper function which sets a parameter in the query of a url.
func | setParamInURL | identifier_name |
|
main.go | "/vwebv/search"
)
// A version flag, which should be overwritten when building using ldflags.
var version = "devel"
// Detourer is a struct which stores the data needed to perform redirects.
type Detourer struct {
idMap map[uint32]uint64 // The map of BibIDs to ExL IDs.
primo string // The domain name (host) for the target Primo instance.
vid string // The vid parameter to use when building Primo URLs.
}
// The Detourer serves HTTP redirects based on the request.
func (d Detourer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// In the default case, redirect to the Primo search form.
redirectTo := &url.URL{
Scheme: "https",
Host: d.primo,
Path: "/discovery/search",
}
// Depending on the prefix...
switch {
case strings.HasPrefix(r.URL.Path, RecordPrefix):
buildRecordRedirect(redirectTo, r, d.idMap)
case strings.HasPrefix(r.URL.Path, PatronInfoPrefix):
redirectTo.Path = "/discovery/login"
case strings.HasPrefix(r.URL.Path, PatronInfoPrefix2):
redirectTo.Path = "/discovery/login"
case strings.HasPrefix(r.URL.Path, SearchPrefix):
buildSearchRedirect(redirectTo, r)
}
// Set the vid parameter on all redirects.
setParamInURL(redirectTo, "vid", d.vid)
// Send the redirect to the client.
// http.Redirect(w, r, redirectTo.String(), http.StatusMovedPermanently)
http.Redirect(w, r, redirectTo.String(), http.StatusTemporaryRedirect)
}
// buildRecordRedirect updates redirectTo to the correct Primo record URL for the requested bibID.
func buildRecordRedirect(redirectTo *url.URL, r *http.Request, idMap map[uint32]uint64) {
q := r.URL.Query()
// bibID64, err := strconv.ParseUint(r.URL.Path[len(RecordPrefix):], 10, 32)
bibID64, err := strconv.ParseUint(q.Get("bibId"), 10, 32)
if err == nil | else {
log.Fatalln(err)
}
}
// SearchAuthorIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=NAME"
// SearchCallNumberIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=CALL"
// SearchTitleIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=T"
// SearchJournalIndexPrefix string = "/vwebv/search?searchArg=XXX&searchCode=JALL"
// buildSearchRedirect updates redirectTo to an approximate Primo URL for the requested search.
func buildSearchRedirect(redirectTo *url.URL, r *http.Request) {
q := r.URL.Query()
setParamInURL(redirectTo, "tab", "Everything")
setParamInURL(redirectTo, "search_scope", "MyInst_and_CI")
if q.Get("searchArg") != "" {
switch q.Get("searchCode") {
case "TKEY^":
setParamInURL(redirectTo, "query", fmt.Sprintf("title,contains,%v", q.Get("searchArg")))
case "TALL":
setParamInURL(redirectTo, "query", fmt.Sprintf("title,contains,%v", q.Get("searchArg")))
case "NAME":
redirectTo.Path = "/discovery/browse"
setParamInURL(redirectTo, "browseScope", "author")
setParamInURL(redirectTo, "browseQuery", q.Get("searchArg"))
case "CALL":
redirectTo.Path = "/discovery/browse"
setParamInURL(redirectTo, "browseScope", "callnumber.0")
setParamInURL(redirectTo, "browseQuery", q.Get("searchArg"))
case "JALL":
redirectTo.Path = "/discovery/jsearch"
setParamInURL(redirectTo, "tab", "jsearch_slot")
setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg")))
default:
setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("searchArg")))
}
} else if q.Get("SEARCH") != "" {
setParamInURL(redirectTo, "query", fmt.Sprintf("any,contains,%v", q.Get("SEARCH")))
}
}
func main() {
// Define the command line flags.
addr := flag.String("address", DefaultAddress, "Address to bind on.")
subdomain := flag.String("primo", subDomain, "The subdomain of the target Primo instance, ?????.primo.exlibrisgroup.com.")
vid := flag.String("vid", instVID, "VID parameter for Primo.")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Permanent Detour: A tiny web service which redirects Voyager Web OPAC requests to Primo URLs.\n")
fmt.Fprintf(os.Stderr, "Version %v\n", version)
fmt.Fprintf(os.Stderr, "Usage: permanentdetour [flag...] [file...]\n")
flag.PrintDefaults()
fmt.Fprintln(os.Stderr, " Environment variables read when flag is unset:")
flag.VisitAll(func(f *flag.Flag) {
uppercaseName := strings.ToUpper(f.Name)
fmt.Fprintf(os.Stderr, " %v%v\n", EnvPrefix, uppercaseName)
})
}
// Process the flags.
flag.Parse()
// If any flags have not been set, see if there are
// environment variables that set them.
err := overrideUnsetFlagsFromEnvironmentVariables()
if err != nil {
log.Fatalln(err)
}
// The Detourer has all the data needed to build redirects.
d := Detourer{
primo: fmt.Sprintf("%v.%v", *subdomain, PrimoDomain),
vid: *vid,
}
// Map of BibIDs to ExL IDs
// The initial size is an estimate based on the number of arguments.
size := uint64(len(flag.Args())) * MaxMappingFileLength
d.idMap = make(map[uint32]uint64, size)
// Process each file in the arguments list.
for _, mappingFilePath := range flag.Args() {
// Add the mappings from this file to the idMap.
err := processFile(d.idMap, mappingFilePath)
if err != nil {
log.Fatal(err)
}
}
log.Printf("%v VGer BibID to Ex Libris ID mappings processed.\n", len(d.idMap))
// Use an explicit request multiplexer.
mux := http.NewServeMux()
mux.Handle("/", d)
server := http.Server{
Addr: *addr,
Handler: mux,
}
shutdown := make(chan struct{})
go func() {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
// Wait to receive a message on the channel.
<-sigs
err := server.Shutdown(context.Background())
if err != nil {
log.Printf("Error shutting down server, %v.\n", err)
}
close(shutdown)
}()
log.Println("Starting server.")
err = server.ListenAndServe()
if err != http.ErrServerClosed {
log.Fatalf("Fatal server error, %v.\n", err)
}
<-shutdown
log.Println("Server stopped.")
}
// processFile takes a file path, opens the file, and reads it line by line to extract id mappings.
func processFile(m map[uint32]uint64, mappingFilePath string) error {
// Get the absolute path of the file. Not strictly necessary, but creates clearer error messages.
absFilePath, err := filepath.Abs(mappingFilePath)
if err != nil {
return fmt.Errorf("Could not get absolute path of %v, %v.\n", mappingFilePath, err)
}
// Open the file for reading. Close the file automatically when done.
file, err := os.Open(absFilePath)
if err != nil {
return fmt.Errorf("Could not open %v for reading, %v.\n", absFilePath, err)
}
defer file.Close()
// Read the file line by line.
scanner := bufio.NewScanner(file)
lnum := 0
for scanner.Scan() {
lnum += 1
bibID, exlID, err := processLine(scanner.Text())
if err != nil {
return fmt.Errorf("Unable to process line %v '%v', %v.\n", lnum, scanner.Text(), err)
}
_, present := m[bibID]
if present {
return fmt.Errorf("Previously seen Bib ID %v was encountered.\n", bibID)
}
m[bibID] = exlID
}
err = scanner.Err()
if err != nil {
return fmt.Errorf("Scanner error when processing %v, %v.\n", absFilePath, err)
}
return nil
}
| {
bibID := uint32(bibID64)
exlID, present := idMap[bibID]
if present {
redirectTo.Path = "/discovery/fulldisplay"
setParamInURL(redirectTo, "docid", fmt.Sprintf("alma%v", exlID))
} else {
log.Printf("Not found: %v", bibID64)
}
} | conditional_block |
import.rs | ST_T: "fstT" = 180;
thm FST_DEF: "FST_DEF" = 181;
term SND: "snd" = 37;
thm SND_T: "sndT" = 184;
thm SND_DEF: "SND_DEF" = 185;
term IND: "ind" = 40;
term ONE_ONE: "one_one" = 38;
thm ONE_ONE_T: "one_one_T" = 188;
thm ONE_ONE_BD: "one_one_BD" = 189;
thm ONE_ONE_DEF: "one_one_DEF" = 191;
term ONTO: "onto" = 39;
thm ONTO_T: "onto_T" = 193;
thm ONTO_BD: "onto_BD" = 194;
thm ONTO_DEF: "onto_DEF" = 196;
thm INFINITY_AX: "inf" = 198;
term IND_SUC: "IND_SUC" = 41;
thm IND_SUC_T: "IND_SUC_T" = 199;
thm IND_SUC_DEF: "IND_SUC_DEF" = 200;
term IND_0: "IND_0" = 42;
thm IND_0_T: "IND_0_T" = 203;
thm IND_0_DEF: "IND_0_DEF" = 204;
term NUM_REP: "NUM_REP" = 43;
thm NUM_REP_T: "NUM_REP_T" = 205;
thm NUM_REP_DEF: "NUM_REP_DEF" = 206;
term NUM: "num" = 44;
thm NUM_THM: "NUM_THM" = 207;
term MK_NUM: "mk_num" = 45;
thm MK_NUM_T: "mk_numT" = 208;
term DEST_NUM: "dest_num" = 46;
thm DEST_NUM_T: "dest_numT" = 209;
thm NUM_BIJ1: "NUM_BIJ1" = 212;
thm NUM_BIJ2: "NUM_BIJ2" = 213;
tydef NUM_TYDEF: (NUM_THM, NUM,
[(MK_NUM, MK_NUM_T), (DEST_NUM, DEST_NUM_T)], [NUM_BIJ1, NUM_BIJ2]) = 1;
term ZERO: "_0" = 47;
thm ZERO_T: "_0T" = 214;
thm ZERO_DEF: "_0_DEF" = 215;
term SUC: "suc" = 48;
thm SUC_T: "sucT" = 216;
thm SUC_BD: "suc_BD" = 218;
thm SUC_DEF: "suc_DEF" = 219;
term NUMERAL: "NUMERAL" = 49;
thm NUMERAL_T: "NUMERAL_T" = 220;
thm NUMERAL_BD: "NUMERAL_BD" = 222;
thm NUMERAL_DEF: "NUMERAL_DEF" = 223;
term BIT0: "bit0" = 51;
thm BIT0_T: "bit0T" = 230;
thm BIT0_DEF: "bit0_DEF" = 232;
term BIT1: "bit1" = 52;
thm BIT1_T: "bit1T" = 233;
thm BIT1_BD: "bit1_BD" = 235;
thm BIT1_DEF: "bit1_DEF" = 236;
term PRE: "pre" = 54;
thm PRE_T: "preT" = 238;
thm PRE_DEF: "pre_DEF" = 239;
thm PRE_SPEC: "PRE" = 240;
term ADD: "add" = 55;
thm ADD_T: "addT" = 241;
thm ADD_DEF: "add_DEF" = 243;
thm ADD_SPEC: "ADD" = 244;
term MUL: "mul" = 57;
thm MUL_T: "mulT" = 245;
thm MUL_DEF: "mul_DEF" = 247;
thm MUL_SPEC: "MUL" = 248;
term EXP: "exp" = 59;
thm EXP_T: "expT" = 250;
thm EXP_DEF: "exp_DEF" = 252;
thm EXP_SPEC: "EXP" = 253;
term LE: "le" = 60;
thm LE_T: "leT" = 254;
thm LE_DEF: "le_DEF" = 256;
thm LE_SPEC: "LE" = 257;
term LT: "lt" = 62;
thm LT_T: "ltT" = 258;
thm LT_DEF: "lt_DEF" = 260;
thm LT_SPEC: "LT" = 261;
term GE: "ge" = 64;
thm GE_T: "geT" = 263;
thm GE_BD: "ge_BD" = 264;
thm GE_DEF: "ge_DEF" = 265;
term GT: "gt" = 65;
thm GT_T: "gtT" = 266;
thm GT_BD: "gt_BD" = 267;
thm GT_DEF: "gt_DEF" = 268;
term EVEN: "even" = 66;
thm EVEN_T: "evenT" = 269;
thm EVEN_DEF: "even_DEF" = 270;
thm EVEN_SPEC: "EVEN" = 271;
term ODD: "odd" = 67;
thm ODD_T: "oddT" = 272;
thm ODD_DEF: "odd_DEF" = 273;
thm ODD_SPEC: "ODD" = 274;
term SUB: "sub" = 68;
thm SUB_T: "subT" = 276;
thm SUB_DEF: "sub_DEF" = 278;
thm SUB_SPEC: "SUB" = 279;
term TYPEDEF: "TYPEDEF" = 70;
thm TYPEDEF_T: "TYPEDEF_T" = 280;
thm TYPEDEF_DEF: "TYPEDEF_DEF" = 281;
thm AND_DEF1: "AND_DEF1" = 102;
thm EXISTS_THM: "EXISTS_THM" = 158;
thm EU_DEF1: "EU_DEF1" = 156;
thm IMP_ANTISYM_AX: "IMP_ANTISYM_AX" = 103;
thm BOOL_CASES_AX: "BOOL_CASES_AX" = 161;
thm TRUTH: "TRUTH" = 53;
thm NOT_TRUE: "NOT_TRUE" = 152;
thm EM: "em" = 159;
thm PAIR_EQ: "PAIR_EQ" = 178;
thm PAIR_SURJ: "PAIR_SURJ" = 179;
thm FST_THM: "FST" = 183;
thm SND_THM: "SND" = 187;
thm IND_SUC_0: "IND_SUC_0" = 201;
thm IND_SUC_INJ: "IND_SUC_INJ" = 202;
thm NOT_SUC: "NOT_SUC" = 225;
thm SUC_INJ: "SUC_INJ" = 226;
thm NUM_CASES: "num_CASES" = 227;
thm NUM_IND: "num_INDUCTION" = 228;
thm NUM_REC: "num_RECURSION" = 229;
thm MUL1: "MUL1" = 249;
thm LE1: "LE1" = 262;
thm ODD1: "ODD1" = 275;
}
pub fn hol_writer(out: PathBuf, temp: PathBuf) -> io::Result<Mm0Writer> {
#[repr(C, align(8))]
pub struct | Aligned | identifier_name |
|
import.rs | "SND_DEF" = 185;
term IND: "ind" = 40;
term ONE_ONE: "one_one" = 38;
thm ONE_ONE_T: "one_one_T" = 188;
thm ONE_ONE_BD: "one_one_BD" = 189;
thm ONE_ONE_DEF: "one_one_DEF" = 191;
term ONTO: "onto" = 39;
thm ONTO_T: "onto_T" = 193;
thm ONTO_BD: "onto_BD" = 194;
thm ONTO_DEF: "onto_DEF" = 196;
thm INFINITY_AX: "inf" = 198;
term IND_SUC: "IND_SUC" = 41;
thm IND_SUC_T: "IND_SUC_T" = 199;
thm IND_SUC_DEF: "IND_SUC_DEF" = 200;
term IND_0: "IND_0" = 42;
thm IND_0_T: "IND_0_T" = 203;
thm IND_0_DEF: "IND_0_DEF" = 204;
term NUM_REP: "NUM_REP" = 43;
thm NUM_REP_T: "NUM_REP_T" = 205;
thm NUM_REP_DEF: "NUM_REP_DEF" = 206;
term NUM: "num" = 44;
thm NUM_THM: "NUM_THM" = 207;
term MK_NUM: "mk_num" = 45;
thm MK_NUM_T: "mk_numT" = 208;
term DEST_NUM: "dest_num" = 46;
thm DEST_NUM_T: "dest_numT" = 209;
thm NUM_BIJ1: "NUM_BIJ1" = 212;
thm NUM_BIJ2: "NUM_BIJ2" = 213;
tydef NUM_TYDEF: (NUM_THM, NUM,
[(MK_NUM, MK_NUM_T), (DEST_NUM, DEST_NUM_T)], [NUM_BIJ1, NUM_BIJ2]) = 1;
term ZERO: "_0" = 47;
thm ZERO_T: "_0T" = 214;
thm ZERO_DEF: "_0_DEF" = 215;
term SUC: "suc" = 48;
thm SUC_T: "sucT" = 216;
thm SUC_BD: "suc_BD" = 218;
thm SUC_DEF: "suc_DEF" = 219;
term NUMERAL: "NUMERAL" = 49;
thm NUMERAL_T: "NUMERAL_T" = 220;
thm NUMERAL_BD: "NUMERAL_BD" = 222;
thm NUMERAL_DEF: "NUMERAL_DEF" = 223;
term BIT0: "bit0" = 51;
thm BIT0_T: "bit0T" = 230;
thm BIT0_DEF: "bit0_DEF" = 232;
term BIT1: "bit1" = 52;
thm BIT1_T: "bit1T" = 233;
thm BIT1_BD: "bit1_BD" = 235;
thm BIT1_DEF: "bit1_DEF" = 236;
term PRE: "pre" = 54;
thm PRE_T: "preT" = 238;
thm PRE_DEF: "pre_DEF" = 239;
thm PRE_SPEC: "PRE" = 240;
term ADD: "add" = 55;
thm ADD_T: "addT" = 241;
thm ADD_DEF: "add_DEF" = 243;
thm ADD_SPEC: "ADD" = 244;
term MUL: "mul" = 57;
thm MUL_T: "mulT" = 245;
thm MUL_DEF: "mul_DEF" = 247;
thm MUL_SPEC: "MUL" = 248;
term EXP: "exp" = 59;
thm EXP_T: "expT" = 250;
thm EXP_DEF: "exp_DEF" = 252;
thm EXP_SPEC: "EXP" = 253;
term LE: "le" = 60;
thm LE_T: "leT" = 254;
thm LE_DEF: "le_DEF" = 256;
thm LE_SPEC: "LE" = 257;
term LT: "lt" = 62;
thm LT_T: "ltT" = 258;
thm LT_DEF: "lt_DEF" = 260;
thm LT_SPEC: "LT" = 261;
term GE: "ge" = 64;
thm GE_T: "geT" = 263;
thm GE_BD: "ge_BD" = 264;
thm GE_DEF: "ge_DEF" = 265;
term GT: "gt" = 65;
thm GT_T: "gtT" = 266;
thm GT_BD: "gt_BD" = 267;
thm GT_DEF: "gt_DEF" = 268;
term EVEN: "even" = 66;
thm EVEN_T: "evenT" = 269;
thm EVEN_DEF: "even_DEF" = 270;
thm EVEN_SPEC: "EVEN" = 271;
term ODD: "odd" = 67;
thm ODD_T: "oddT" = 272;
thm ODD_DEF: "odd_DEF" = 273;
thm ODD_SPEC: "ODD" = 274;
term SUB: "sub" = 68;
thm SUB_T: "subT" = 276;
thm SUB_DEF: "sub_DEF" = 278;
thm SUB_SPEC: "SUB" = 279;
term TYPEDEF: "TYPEDEF" = 70;
thm TYPEDEF_T: "TYPEDEF_T" = 280;
thm TYPEDEF_DEF: "TYPEDEF_DEF" = 281;
thm AND_DEF1: "AND_DEF1" = 102;
thm EXISTS_THM: "EXISTS_THM" = 158;
thm EU_DEF1: "EU_DEF1" = 156;
thm IMP_ANTISYM_AX: "IMP_ANTISYM_AX" = 103;
thm BOOL_CASES_AX: "BOOL_CASES_AX" = 161;
thm TRUTH: "TRUTH" = 53;
thm NOT_TRUE: "NOT_TRUE" = 152;
thm EM: "em" = 159;
thm PAIR_EQ: "PAIR_EQ" = 178;
thm PAIR_SURJ: "PAIR_SURJ" = 179;
thm FST_THM: "FST" = 183;
thm SND_THM: "SND" = 187;
thm IND_SUC_0: "IND_SUC_0" = 201;
thm IND_SUC_INJ: "IND_SUC_INJ" = 202;
thm NOT_SUC: "NOT_SUC" = 225;
thm SUC_INJ: "SUC_INJ" = 226;
thm NUM_CASES: "num_CASES" = 227;
thm NUM_IND: "num_INDUCTION" = 228;
thm NUM_REC: "num_RECURSION" = 229;
thm MUL1: "MUL1" = 249;
thm LE1: "LE1" = 262;
thm ODD1: "ODD1" = 275;
}
pub fn hol_writer(out: PathBuf, temp: PathBuf) -> io::Result<Mm0Writer> {
#[repr(C, align(8))]
pub struct Aligned<T: ?Sized>(T);
static HOL_MMB: &Aligned<[u8]> = &Aligned(*include_bytes!("../../hol.mmb")); | let mmb = MmbFile::parse(&HOL_MMB.0).unwrap();
#[cfg(debug_assertions)] check_consts(&mmb); | random_line_split |
|
main.go | {}
filepath.Walk(o.directory, func(path string, f os.FileInfo, err error) error {
fileList = append(fileList, path)
return nil
})
for _, file := range fileList {
collectFiles = append(collectFiles, file)
}
dirFiles := searchFiles("edits", collectFiles)
for stuff := 0; stuff < len(dirFiles); stuff++ {
editName := strings.SplitAfter(dirFiles[stuff], "/edits/")
// fmt.Println(editName)
jj = append(jj, editName[0][:len(editName[0])-7])
if stuff == len(dirFiles)-1 {
encountered := map[string]bool{}
for v := range jj {
if encountered[jj[v]] == true {
} else {
encountered[jj[v]] = true
result = append(result, jj[v])
// Create directories
if o.optype != "ffprobe" {
fmt.Println("directory created")
cmd3 := exec.Command("mkdir", jj[v]+o.outputtype)
cmd3.Run()
}
}
}
}
// Defer the creation of the content until all folders have been created
defer generateContent(o, dirFiles[stuff], editName)
}
// Clear buffers
collectFiles, jj, result = nil, nil, nil
if o.monthlycsv != false {
fmt.Println("generating csv analysis file~~~~~~~")
generateCSV(dirFiles)
}
}
func generateCSV(input []string) {
var matrix [][]string
for i := 0; i < len(input); i++ {
// fmt.Println(input[i] + "s\n")
editName := strings.SplitAfter(input[i], "/edits/")
// fmt.Println(editName)
data, err := ffprobe.GetProbeData(input[i], 5000*time.Millisecond)
if err != nil {
log.Panicf("Error getting data: %v", err)
}
// MarhsalIndent the incoming data, accessible via buf variable (w/ error handling)
buf, err := json.MarshalIndent(data, "", " ")
if err != nil {
log.Panicf("Error unmarshalling: %v", err)
}
// Connect struct to variable
var probed Ffprobe
// Unmarshal buffer into variable defined by the Ffprobe type
if err := json.Unmarshal(buf, &probed); err != nil {
panic(err)
}
// Base filename
ffprobeFilename := probed.Format.Filename
// Clean up the filename
cleanFilename := filepath.Base(ffprobeFilename)
// Unix date for the current file
unixdate := string(probed.Format.Tags.CreationTime.Format(time.RFC850))
// Split the Unix date by a comma
s := strings.Split(unixdate, ",")
// Title of file
title := cleanFilename[:len(cleanFilename)-4]
// Type of file
typer := cleanFilename[len(cleanFilename)-4:]
// Path of file
path := editName[0]
jo := strings.SplitAfter(input[i], "/edits/")
again := jo[0][:len(jo[0])-7]
splitagain := strings.SplitAfter(again, "/")
again2 := splitagain[len(splitagain)-1]
jj := strings.SplitAfter(again2, "-")
// Folder month
folderMonth := jj[0][:len(jj[0])-1]
// Folder day
folderDay := jj[1][:len(jj[1])-1]
// Folder year
folderYear := jj[2][:len(jj[2])]
// Edit Month
editMonth := s[1][4:7]
// Edit date
editDate := s[1][1:11]
// Edit day (i.e. Monday)
editDay := s[0]
// Edit year
editYear := "20" + s[1][8:11]
// Timestamp of current file
timestamp := s[1][11:19]
// Location of the current file
loc := s[1][20:23]
matrix = append(matrix, []string{
title,
folderMonth,
folderDay,
folderYear,
editMonth,
editDay,
editYear,
editDate[:2],
typer,
path,
timestamp,
loc,
probed.Format.Duration,
probed.Format.Size,
probed.Format.BitRate,
probed.Format.FormatName,
probed.Format.FormatLongName})
}
targetDirectory := strings.SplitAfter(input[0], "/edits/")
fmt.Println(targetDirectory[0][:len(targetDirectory[0])-17] + "\n")
again := strings.SplitAfter(targetDirectory[0][:len(targetDirectory[0])-17], "-")
fmt.Println(again)
year := again[0][len(again[0])-5:]
month := again[1]
day := again[2][:len(again[2])-1]
fmt.Println(year + month + day)
combine := year + month + day
root := targetDirectory[0][:len(targetDirectory[0])-18]
csvFile := root + "/" + combine + "-FFProbeAnalysis-V4.csv"
fmt.Println(csvFile)
file, err := os.Create(csvFile)
if err != nil {
log.Fatal("FAILED TO CREATE CSV", err)
}
defer file.Close()
writer := csv.NewWriter(file)
defer writer.Flush()
// Iterate over the FFmpeg matrix
for _, value := range matrix {
// Write data to new .csv file
err := writer.Write(value)
if err != nil {
log.Fatal("FAILED TO WRITE CSV", err)
}
}
fmt.Println("done!")
}
// generateContent
// Define an operation using "case" and pass files from the directory
func generateContent(o operations, dirFiles string, editName []string) {
switch o.optype {
case "png": // create pngs
mp4File := dirFiles
pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png"
cmd := exec.Command("ffmpeg", "-y", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] palettegen", pngFile)
cmd.Run()
case "gif": // create gifs
mp4File := dirFiles
gifFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".gif"
cmd := exec.Command("ffmpeg", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] fps=24,scale=w=480:h=-1,split [a][b];[a] palettegen=stats_mode=single [p];[b][p] paletteuse=new=1", gifFile)
cmd.Run()
// Move .gif to gif directory
destinationDirectory := editName[0][:len(editName[0])-7] + "/gif"
cmd2 := exec.Command("mv", gifFile, destinationDirectory)
cmd2.Run()
case "jpg": // create jpgs
fmt.Println("create jpgs")
jpgFolder := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4]
cmd3 := exec.Command("mkdir", jpgFolder)
cmd3.Run()
mp4File := dirFiles
jpgFrames := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4] + "/" + editName[1][:len(editName[1])-4] + "-frame-%04d.jpg"
defer generateJpgs(mp4File, jpgFrames)
case "histogram1": // create histogram
fmt.Println("create histo1")
histo1Folder := editName[0][:len(editName[0])-7] + "/histogram"
cmd := exec.Command("mkdir", histo1Folder)
cmd.Run()
pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png"
histo1File := editName[0][:len(editName[0])-7] + "/histogram/" + editName[1][:len(editName[1])-4] + ".txt"
defer generateHistogram1(pngFile, histo1File)
}
}
func generateJpgs(mp4File string, jpgFrames string) {
cmd5 := exec.Command("ffmpeg", "-n", "-i", mp4File, jpgFrames)
cmd5.Run()
}
func generateHistogram1(pngFile string, histo1File string) | {
cmd := exec.Command("convert", pngFile, "-format", "%c", "histogam:info:", histo1File)
cmd.Run()
} | identifier_body |
|
main.go | csv != false {
fmt.Println("generating csv analysis file~~~~~~~")
generateCSV(dirFiles)
}
}
func generateCSV(input []string) {
var matrix [][]string
for i := 0; i < len(input); i++ {
// fmt.Println(input[i] + "s\n")
editName := strings.SplitAfter(input[i], "/edits/")
// fmt.Println(editName)
data, err := ffprobe.GetProbeData(input[i], 5000*time.Millisecond)
if err != nil {
log.Panicf("Error getting data: %v", err)
}
// MarhsalIndent the incoming data, accessible via buf variable (w/ error handling)
buf, err := json.MarshalIndent(data, "", " ")
if err != nil {
log.Panicf("Error unmarshalling: %v", err)
}
// Connect struct to variable
var probed Ffprobe
// Unmarshal buffer into variable defined by the Ffprobe type
if err := json.Unmarshal(buf, &probed); err != nil {
panic(err)
}
// Base filename
ffprobeFilename := probed.Format.Filename
// Clean up the filename
cleanFilename := filepath.Base(ffprobeFilename)
// Unix date for the current file
unixdate := string(probed.Format.Tags.CreationTime.Format(time.RFC850))
// Split the Unix date by a comma
s := strings.Split(unixdate, ",")
// Title of file
title := cleanFilename[:len(cleanFilename)-4]
// Type of file
typer := cleanFilename[len(cleanFilename)-4:]
// Path of file
path := editName[0]
jo := strings.SplitAfter(input[i], "/edits/")
again := jo[0][:len(jo[0])-7]
splitagain := strings.SplitAfter(again, "/")
again2 := splitagain[len(splitagain)-1]
jj := strings.SplitAfter(again2, "-")
// Folder month
folderMonth := jj[0][:len(jj[0])-1]
// Folder day
folderDay := jj[1][:len(jj[1])-1]
// Folder year
folderYear := jj[2][:len(jj[2])]
// Edit Month
editMonth := s[1][4:7]
// Edit date
editDate := s[1][1:11]
// Edit day (i.e. Monday)
editDay := s[0]
// Edit year
editYear := "20" + s[1][8:11]
// Timestamp of current file
timestamp := s[1][11:19]
// Location of the current file
loc := s[1][20:23]
matrix = append(matrix, []string{
title,
folderMonth,
folderDay,
folderYear,
editMonth,
editDay,
editYear,
editDate[:2],
typer,
path,
timestamp,
loc,
probed.Format.Duration,
probed.Format.Size,
probed.Format.BitRate,
probed.Format.FormatName,
probed.Format.FormatLongName})
}
targetDirectory := strings.SplitAfter(input[0], "/edits/")
fmt.Println(targetDirectory[0][:len(targetDirectory[0])-17] + "\n")
again := strings.SplitAfter(targetDirectory[0][:len(targetDirectory[0])-17], "-")
fmt.Println(again)
year := again[0][len(again[0])-5:]
month := again[1]
day := again[2][:len(again[2])-1]
fmt.Println(year + month + day)
combine := year + month + day
root := targetDirectory[0][:len(targetDirectory[0])-18]
csvFile := root + "/" + combine + "-FFProbeAnalysis-V4.csv"
fmt.Println(csvFile)
file, err := os.Create(csvFile)
if err != nil {
log.Fatal("FAILED TO CREATE CSV", err)
}
defer file.Close()
writer := csv.NewWriter(file)
defer writer.Flush()
// Iterate over the FFmpeg matrix
for _, value := range matrix {
// Write data to new .csv file
err := writer.Write(value)
if err != nil {
log.Fatal("FAILED TO WRITE CSV", err)
}
}
fmt.Println("done!")
}
// generateContent
// Define an operation using "case" and pass files from the directory
func generateContent(o operations, dirFiles string, editName []string) {
switch o.optype {
case "png": // create pngs
mp4File := dirFiles
pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png"
cmd := exec.Command("ffmpeg", "-y", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] palettegen", pngFile)
cmd.Run()
case "gif": // create gifs
mp4File := dirFiles
gifFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".gif"
cmd := exec.Command("ffmpeg", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] fps=24,scale=w=480:h=-1,split [a][b];[a] palettegen=stats_mode=single [p];[b][p] paletteuse=new=1", gifFile)
cmd.Run()
// Move .gif to gif directory
destinationDirectory := editName[0][:len(editName[0])-7] + "/gif"
cmd2 := exec.Command("mv", gifFile, destinationDirectory)
cmd2.Run()
case "jpg": // create jpgs
fmt.Println("create jpgs")
jpgFolder := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4]
cmd3 := exec.Command("mkdir", jpgFolder)
cmd3.Run()
mp4File := dirFiles
jpgFrames := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4] + "/" + editName[1][:len(editName[1])-4] + "-frame-%04d.jpg"
defer generateJpgs(mp4File, jpgFrames)
case "histogram1": // create histogram
fmt.Println("create histo1")
histo1Folder := editName[0][:len(editName[0])-7] + "/histogram"
cmd := exec.Command("mkdir", histo1Folder)
cmd.Run()
pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png"
histo1File := editName[0][:len(editName[0])-7] + "/histogram/" + editName[1][:len(editName[1])-4] + ".txt"
defer generateHistogram1(pngFile, histo1File)
}
}
func generateJpgs(mp4File string, jpgFrames string) {
cmd5 := exec.Command("ffmpeg", "-n", "-i", mp4File, jpgFrames)
cmd5.Run()
}
func generateHistogram1(pngFile string, histo1File string) {
cmd := exec.Command("convert", pngFile, "-format", "%c", "histogam:info:", histo1File)
cmd.Run()
}
func main() {
globalDir := "/Users/csk/Documents/_REPO/1987-06-may/"
runSuite(globalDir)
}
func runSuite(globalDir string) {
var br operations
// // PNG test
// br.directory = globalDir
// br.outputtype = "/png"
// br.optype = "png"
// var i1 BroadcastResearch = br
// i1.DefineTargets()
// // GIF test
// br.directory = globalDir
// br.outputtype = "/gif"
// br.optype = "gif"
// var i2 BroadcastResearch = br
// i2.DefineTargets()
// // JPG test
// br.directory = globalDir
// br.outputtype = "/jpg"
// br.optype = "jpg"
// var i3 BroadcastResearch = br
// i3.DefineTargets()
// // HISTOGRAM-1 test
// br.directory = globalDir
// br.outputtype = "/histogram"
// br.optype = "histogram1"
// var i4 BroadcastResearch = br
// i4.DefineTargets()
// PNG test
br.directory = globalDir
br.outputtype = "/ffprobe"
br.optype = "ffprobe"
br.monthlycsv = true
var i5 BroadcastResearch = br
i5.DefineTargets()
}
func | searchFiles | identifier_name |
|
main.go | var collectFiles, jj, result []string
fileList := []string{}
filepath.Walk(o.directory, func(path string, f os.FileInfo, err error) error {
fileList = append(fileList, path)
return nil
})
for _, file := range fileList |
dirFiles := searchFiles("edits", collectFiles)
for stuff := 0; stuff < len(dirFiles); stuff++ {
editName := strings.SplitAfter(dirFiles[stuff], "/edits/")
// fmt.Println(editName)
jj = append(jj, editName[0][:len(editName[0])-7])
if stuff == len(dirFiles)-1 {
encountered := map[string]bool{}
for v := range jj {
if encountered[jj[v]] == true {
} else {
encountered[jj[v]] = true
result = append(result, jj[v])
// Create directories
if o.optype != "ffprobe" {
fmt.Println("directory created")
cmd3 := exec.Command("mkdir", jj[v]+o.outputtype)
cmd3.Run()
}
}
}
}
// Defer the creation of the content until all folders have been created
defer generateContent(o, dirFiles[stuff], editName)
}
// Clear buffers
collectFiles, jj, result = nil, nil, nil
if o.monthlycsv != false {
fmt.Println("generating csv analysis file~~~~~~~")
generateCSV(dirFiles)
}
}
func generateCSV(input []string) {
var matrix [][]string
for i := 0; i < len(input); i++ {
// fmt.Println(input[i] + "s\n")
editName := strings.SplitAfter(input[i], "/edits/")
// fmt.Println(editName)
data, err := ffprobe.GetProbeData(input[i], 5000*time.Millisecond)
if err != nil {
log.Panicf("Error getting data: %v", err)
}
// MarhsalIndent the incoming data, accessible via buf variable (w/ error handling)
buf, err := json.MarshalIndent(data, "", " ")
if err != nil {
log.Panicf("Error unmarshalling: %v", err)
}
// Connect struct to variable
var probed Ffprobe
// Unmarshal buffer into variable defined by the Ffprobe type
if err := json.Unmarshal(buf, &probed); err != nil {
panic(err)
}
// Base filename
ffprobeFilename := probed.Format.Filename
// Clean up the filename
cleanFilename := filepath.Base(ffprobeFilename)
// Unix date for the current file
unixdate := string(probed.Format.Tags.CreationTime.Format(time.RFC850))
// Split the Unix date by a comma
s := strings.Split(unixdate, ",")
// Title of file
title := cleanFilename[:len(cleanFilename)-4]
// Type of file
typer := cleanFilename[len(cleanFilename)-4:]
// Path of file
path := editName[0]
jo := strings.SplitAfter(input[i], "/edits/")
again := jo[0][:len(jo[0])-7]
splitagain := strings.SplitAfter(again, "/")
again2 := splitagain[len(splitagain)-1]
jj := strings.SplitAfter(again2, "-")
// Folder month
folderMonth := jj[0][:len(jj[0])-1]
// Folder day
folderDay := jj[1][:len(jj[1])-1]
// Folder year
folderYear := jj[2][:len(jj[2])]
// Edit Month
editMonth := s[1][4:7]
// Edit date
editDate := s[1][1:11]
// Edit day (i.e. Monday)
editDay := s[0]
// Edit year
editYear := "20" + s[1][8:11]
// Timestamp of current file
timestamp := s[1][11:19]
// Location of the current file
loc := s[1][20:23]
matrix = append(matrix, []string{
title,
folderMonth,
folderDay,
folderYear,
editMonth,
editDay,
editYear,
editDate[:2],
typer,
path,
timestamp,
loc,
probed.Format.Duration,
probed.Format.Size,
probed.Format.BitRate,
probed.Format.FormatName,
probed.Format.FormatLongName})
}
targetDirectory := strings.SplitAfter(input[0], "/edits/")
fmt.Println(targetDirectory[0][:len(targetDirectory[0])-17] + "\n")
again := strings.SplitAfter(targetDirectory[0][:len(targetDirectory[0])-17], "-")
fmt.Println(again)
year := again[0][len(again[0])-5:]
month := again[1]
day := again[2][:len(again[2])-1]
fmt.Println(year + month + day)
combine := year + month + day
root := targetDirectory[0][:len(targetDirectory[0])-18]
csvFile := root + "/" + combine + "-FFProbeAnalysis-V4.csv"
fmt.Println(csvFile)
file, err := os.Create(csvFile)
if err != nil {
log.Fatal("FAILED TO CREATE CSV", err)
}
defer file.Close()
writer := csv.NewWriter(file)
defer writer.Flush()
// Iterate over the FFmpeg matrix
for _, value := range matrix {
// Write data to new .csv file
err := writer.Write(value)
if err != nil {
log.Fatal("FAILED TO WRITE CSV", err)
}
}
fmt.Println("done!")
}
// generateContent
// Define an operation using "case" and pass files from the directory
func generateContent(o operations, dirFiles string, editName []string) {
switch o.optype {
case "png": // create pngs
mp4File := dirFiles
pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png"
cmd := exec.Command("ffmpeg", "-y", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] palettegen", pngFile)
cmd.Run()
case "gif": // create gifs
mp4File := dirFiles
gifFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".gif"
cmd := exec.Command("ffmpeg", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] fps=24,scale=w=480:h=-1,split [a][b];[a] palettegen=stats_mode=single [p];[b][p] paletteuse=new=1", gifFile)
cmd.Run()
// Move .gif to gif directory
destinationDirectory := editName[0][:len(editName[0])-7] + "/gif"
cmd2 := exec.Command("mv", gifFile, destinationDirectory)
cmd2.Run()
case "jpg": // create jpgs
fmt.Println("create jpgs")
jpgFolder := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4]
cmd3 := exec.Command("mkdir", jpgFolder)
cmd3.Run()
mp4File := dirFiles
jpgFrames := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4] + "/" + editName[1][:len(editName[1])-4] + "-frame-%04d.jpg"
defer generateJpgs(mp4File, jpgFrames)
case "histogram1": // create histogram
fmt.Println("create histo1")
histo1Folder := editName[0][:len(editName[0])-7] + "/histogram"
cmd := exec.Command("mkdir", histo1Folder)
cmd.Run()
pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png"
histo1File := editName[0][:len(editName[0])-7] + "/histogram/" + editName[1][:len(editName[1])-4] + ".txt"
defer generateHistogram1(pngFile, histo1File)
}
}
func generateJpgs(mp4File string, jpgFrames string) {
cmd5 := exec.Command("ffmpeg", "-n", "-i", mp4File, jpgFrames)
cmd5.Run()
}
func generateHistogram1(pngFile string, histo1File string) {
cmd := exec.Command("convert", pngFile, "-format", "%c | {
collectFiles = append(collectFiles, file)
} | conditional_block |
main.go | {
var collectFiles, jj, result []string
| return nil
})
for _, file := range fileList {
collectFiles = append(collectFiles, file)
}
dirFiles := searchFiles("edits", collectFiles)
for stuff := 0; stuff < len(dirFiles); stuff++ {
editName := strings.SplitAfter(dirFiles[stuff], "/edits/")
// fmt.Println(editName)
jj = append(jj, editName[0][:len(editName[0])-7])
if stuff == len(dirFiles)-1 {
encountered := map[string]bool{}
for v := range jj {
if encountered[jj[v]] == true {
} else {
encountered[jj[v]] = true
result = append(result, jj[v])
// Create directories
if o.optype != "ffprobe" {
fmt.Println("directory created")
cmd3 := exec.Command("mkdir", jj[v]+o.outputtype)
cmd3.Run()
}
}
}
}
// Defer the creation of the content until all folders have been created
defer generateContent(o, dirFiles[stuff], editName)
}
// Clear buffers
collectFiles, jj, result = nil, nil, nil
if o.monthlycsv != false {
fmt.Println("generating csv analysis file~~~~~~~")
generateCSV(dirFiles)
}
}
func generateCSV(input []string) {
var matrix [][]string
for i := 0; i < len(input); i++ {
// fmt.Println(input[i] + "s\n")
editName := strings.SplitAfter(input[i], "/edits/")
// fmt.Println(editName)
data, err := ffprobe.GetProbeData(input[i], 5000*time.Millisecond)
if err != nil {
log.Panicf("Error getting data: %v", err)
}
// MarhsalIndent the incoming data, accessible via buf variable (w/ error handling)
buf, err := json.MarshalIndent(data, "", " ")
if err != nil {
log.Panicf("Error unmarshalling: %v", err)
}
// Connect struct to variable
var probed Ffprobe
// Unmarshal buffer into variable defined by the Ffprobe type
if err := json.Unmarshal(buf, &probed); err != nil {
panic(err)
}
// Base filename
ffprobeFilename := probed.Format.Filename
// Clean up the filename
cleanFilename := filepath.Base(ffprobeFilename)
// Unix date for the current file
unixdate := string(probed.Format.Tags.CreationTime.Format(time.RFC850))
// Split the Unix date by a comma
s := strings.Split(unixdate, ",")
// Title of file
title := cleanFilename[:len(cleanFilename)-4]
// Type of file
typer := cleanFilename[len(cleanFilename)-4:]
// Path of file
path := editName[0]
jo := strings.SplitAfter(input[i], "/edits/")
again := jo[0][:len(jo[0])-7]
splitagain := strings.SplitAfter(again, "/")
again2 := splitagain[len(splitagain)-1]
jj := strings.SplitAfter(again2, "-")
// Folder month
folderMonth := jj[0][:len(jj[0])-1]
// Folder day
folderDay := jj[1][:len(jj[1])-1]
// Folder year
folderYear := jj[2][:len(jj[2])]
// Edit Month
editMonth := s[1][4:7]
// Edit date
editDate := s[1][1:11]
// Edit day (i.e. Monday)
editDay := s[0]
// Edit year
editYear := "20" + s[1][8:11]
// Timestamp of current file
timestamp := s[1][11:19]
// Location of the current file
loc := s[1][20:23]
matrix = append(matrix, []string{
title,
folderMonth,
folderDay,
folderYear,
editMonth,
editDay,
editYear,
editDate[:2],
typer,
path,
timestamp,
loc,
probed.Format.Duration,
probed.Format.Size,
probed.Format.BitRate,
probed.Format.FormatName,
probed.Format.FormatLongName})
}
targetDirectory := strings.SplitAfter(input[0], "/edits/")
fmt.Println(targetDirectory[0][:len(targetDirectory[0])-17] + "\n")
again := strings.SplitAfter(targetDirectory[0][:len(targetDirectory[0])-17], "-")
fmt.Println(again)
year := again[0][len(again[0])-5:]
month := again[1]
day := again[2][:len(again[2])-1]
fmt.Println(year + month + day)
combine := year + month + day
root := targetDirectory[0][:len(targetDirectory[0])-18]
csvFile := root + "/" + combine + "-FFProbeAnalysis-V4.csv"
fmt.Println(csvFile)
file, err := os.Create(csvFile)
if err != nil {
log.Fatal("FAILED TO CREATE CSV", err)
}
defer file.Close()
writer := csv.NewWriter(file)
defer writer.Flush()
// Iterate over the FFmpeg matrix
for _, value := range matrix {
// Write data to new .csv file
err := writer.Write(value)
if err != nil {
log.Fatal("FAILED TO WRITE CSV", err)
}
}
fmt.Println("done!")
}
// generateContent
// Define an operation using "case" and pass files from the directory
func generateContent(o operations, dirFiles string, editName []string) {
switch o.optype {
case "png": // create pngs
mp4File := dirFiles
pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png"
cmd := exec.Command("ffmpeg", "-y", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] palettegen", pngFile)
cmd.Run()
case "gif": // create gifs
mp4File := dirFiles
gifFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".gif"
cmd := exec.Command("ffmpeg", "-ss", "0", "-t", "11", "-i", mp4File, "-filter_complex", "[0:v] fps=24,scale=w=480:h=-1,split [a][b];[a] palettegen=stats_mode=single [p];[b][p] paletteuse=new=1", gifFile)
cmd.Run()
// Move .gif to gif directory
destinationDirectory := editName[0][:len(editName[0])-7] + "/gif"
cmd2 := exec.Command("mv", gifFile, destinationDirectory)
cmd2.Run()
case "jpg": // create jpgs
fmt.Println("create jpgs")
jpgFolder := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4]
cmd3 := exec.Command("mkdir", jpgFolder)
cmd3.Run()
mp4File := dirFiles
jpgFrames := editName[0][:len(editName[0])-7] + "/jpg/" + editName[1][:len(editName[1])-4] + "/" + editName[1][:len(editName[1])-4] + "-frame-%04d.jpg"
defer generateJpgs(mp4File, jpgFrames)
case "histogram1": // create histogram
fmt.Println("create histo1")
histo1Folder := editName[0][:len(editName[0])-7] + "/histogram"
cmd := exec.Command("mkdir", histo1Folder)
cmd.Run()
pngFile := editName[0][:len(editName[0])-7] + "/png/" + editName[1][:len(editName[1])-4] + ".png"
histo1File := editName[0][:len(editName[0])-7] + "/histogram/" + editName[1][:len(editName[1])-4] + ".txt"
defer generateHistogram1(pngFile, histo1File)
}
}
func generateJpgs(mp4File string, jpgFrames string) {
cmd5 := exec.Command("ffmpeg", "-n", "-i", mp4File, jpgFrames)
cmd5.Run()
}
func generateHistogram1(pngFile string, histo1File string) {
cmd := exec.Command("convert", pngFile, "-format", "%c", | fileList := []string{}
filepath.Walk(o.directory, func(path string, f os.FileInfo, err error) error {
fileList = append(fileList, path) | random_line_split |
repository.go | {
f, err := os.Open(file)
if err != nil {
return reflow.File{}, err
}
defer f.Close()
w := reflow.Digester.NewWriter()
n, err := io.Copy(w, f)
if err != nil {
return reflow.File{}, err
}
d := w.Digest()
return reflow.File{ID: d, Size: n}, r.InstallDigest(d, file)
}
// InstallDigest installs a file at the given digest. The caller guarantees
// that the file's bytes have the digest d.
func (r *Repository) InstallDigest(d digest.Digest, file string) error {
file, err := filepath.EvalSymlinks(file)
if err != nil {
return err
}
dir, path := r.Path(d)
if err := os.MkdirAll(dir, 0777); err != nil {
return err
}
err = os.Link(file, path)
if os.IsExist(err) {
err = nil
}
if err != nil {
// Copy if file was reported to be on a different device.
if linkErr, ok := err.(*os.LinkError); ok && linkErr.Err == syscall.EXDEV {
f, ferr := os.Open(file)
if ferr != nil {
return ferr
}
defer func() { _ = f.Close() }()
_, err = r.Put(context.Background(), f)
}
}
return err
}
// Stat retrieves metadata for files stored in the repository.
func (r *Repository) Stat(ctx context.Context, id digest.Digest) (reflow.File, error) {
_, path := r.Path(id)
info, err := os.Stat(path)
if err != nil {
return reflow.File{}, errors.E("stat", r.Root, id, err)
}
return reflow.File{ID: id, Size: info.Size()}, nil
}
// Get retrieves the object named by a digest.
func (r *Repository) Get(ctx context.Context, id digest.Digest) (io.ReadCloser, error) {
_, path := r.Path(id)
rc, err := os.Open(path)
if err != nil {
return nil, errors.E("get", r.Root, id, err)
}
return rc, nil
}
// Remove removes an object from the repository.
func (r *Repository) Remove(id digest.Digest) error {
_, path := r.Path(id)
return os.Remove(path)
}
// ReadFrom installs an object directly from a foreign repository. If
// the foreign repository supports supports GetFile, it is used to
// download directly.
//
// GetFile(ctx context.Context, id digest.Digest, w io.WriterAt) (int64, error)
//
// This is used by implementations like S3, which can download
// multiple chunks at once, and requires an io.WriterAt so that
// chunks needn't be buffered in memory until the download is
// contiguous.
//
// ReadFrom singleflights concurrent downloads from the same key
// regardless of repository origin.
func (r *Repository) ReadFrom(ctx context.Context, id digest.Digest, u *url.URL) error {
if ok, _ := r.Contains(id); ok {
return nil
}
_, err, _ := r.read.Do(id.String(), func() (interface{}, error) {
repo, err := repository.Dial(u.String())
if err != nil {
return nil, err
}
type getFiler interface {
GetFile(ctx context.Context, id digest.Digest, w io.WriterAt) (int64, error)
}
if gf, ok := repo.(getFiler); ok {
temp, err := r.TempFile("getfile-")
if err != nil {
return nil, err
}
defer os.Remove(temp.Name())
_, err = gf.GetFile(ctx, id, temp)
if err != nil {
_ = temp.Close()
return nil, err
}
err = temp.Close()
if err != nil {
return nil, err
}
f, err := r.Install(temp.Name())
if err != nil {
return nil, err
}
if id != f.ID {
srcSize := "unknown size"
if srcStat, serr := repo.Stat(ctx, id); serr == nil {
srcSize = data.Size(srcStat.Size).String()
}
dstSize := "unknown size"
if dstStat, derr := temp.Stat(); derr == nil {
dstSize = data.Size(dstStat.Size()).String()
}
return nil, errors.E("readfrom (GetFile)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, f.ID, dstSize))
}
return nil, nil
}
rc, err := repo.Get(ctx, id)
if err != nil {
return nil, err
}
defer rc.Close()
id2, err := r.Put(ctx, rc)
if err != nil {
return nil, err
}
if id != id2 {
srcSize := "unknown size"
if srcStat, serr := repo.Stat(ctx, id); serr == nil {
srcSize = data.Size(srcStat.Size).String()
}
dstSize := "unknown size"
if dstStat, derr := r.Stat(ctx, id2); derr == nil {
dstSize = data.Size(dstStat.Size).String()
}
return nil, errors.E("readfrom (Get)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, id2, dstSize))
}
return nil, nil
})
if err != nil {
r.Log.Debug(err)
}
return err
}
// WriteTo writes an object directly to a foreign repository.
// If the foreign repository supports the PutFile method, it is
// used.
//
// PutFile(context.Context, reflow.File, io.Reader) error
//
// PutFile is useful for implementations like S3 which can upload
// multiple chunks at a time, and requires direct reader (and,
// dynamically, io.Seeker) access.
//
// WriteTo singleflights requests of the same ID and url.
func (r *Repository) WriteTo(ctx context.Context, id digest.Digest, u *url.URL) error {
if ok, _ := r.Contains(id); !ok {
return errors.E("writeto", r.Root, id, u.String(), errors.NotExist)
}
_, err, _ := r.write.Do(id.String()+u.String(), func() (interface{}, error) {
repo, err := repository.Dial(u.String())
if err != nil {
return nil, err
}
dir, path := r.Path(id)
if err := os.MkdirAll(dir, 0777); err != nil {
return nil, err
}
file, err := os.Open(path)
if err != nil {
return nil, errors.E("get", r.Root, id, err)
}
defer file.Close()
info, err := file.Stat()
if err != nil {
return nil, err
}
type putFiler interface {
PutFile(context.Context, reflow.File, io.Reader) error
}
if pf, ok := repo.(putFiler); ok {
return nil, pf.PutFile(ctx, reflow.File{ID: id, Size: info.Size()}, file)
}
id2, err := repo.Put(ctx, file)
if err != nil {
return nil, err
}
if id != id2 {
return nil, errors.E("writeto", r.Root, id, errors.Integrity, errors.Errorf("%v != %v", id, id2))
} |
// URL returns the url of this repository.
func (r *Repository) URL() *url.URL {
return r.RepoURL
}
// Contains tells whether the repository has an object with a digest.
func (r *Repository) Contains(id digest.Digest) (bool, error) {
_, path := r.Path(id)
_, err := os.Stat(path)
if os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
// Put installs an object into the repository. Its digest identity is returned.
func (r *Repository) Put(ctx context.Context, body io.Reader) (digest.Digest, error) {
temp, err := r.TempFile("create-")
if err != nil {
return digest.Digest{}, err
}
defer os.Remove(temp.Name())
dw := reflow.Digester.NewWriter()
done := make(chan error, 1)
// This is a workaround to make sure that copies respect
// context cancellations. Note that the underlying copy is
// not actually cancelled, so this could lead to goroutine
// leaks.
go func() {
_, err = io.Copy(temp, io.TeeReader(body, dw))
temp.Close()
done <- err
}()
select {
case <-ctx.Done():
return digest.Digest{}, ctx.Err()
case err := <-done:
if err != nil {
return digest.Digest{}, err
}
dgst := dw.Digest()
return dgst, r.InstallDigest | return nil, nil
})
return err
} | random_line_split |
repository.go | {
f, err := os.Open(file)
if err != nil {
return reflow.File{}, err
}
defer f.Close()
w := reflow.Digester.NewWriter()
n, err := io.Copy(w, f)
if err != nil {
return reflow.File{}, err
}
d := w.Digest()
return reflow.File{ID: d, Size: n}, r.InstallDigest(d, file)
}
// InstallDigest installs a file at the given digest. The caller guarantees
// that the file's bytes have the digest d.
func (r *Repository) | (d digest.Digest, file string) error {
file, err := filepath.EvalSymlinks(file)
if err != nil {
return err
}
dir, path := r.Path(d)
if err := os.MkdirAll(dir, 0777); err != nil {
return err
}
err = os.Link(file, path)
if os.IsExist(err) {
err = nil
}
if err != nil {
// Copy if file was reported to be on a different device.
if linkErr, ok := err.(*os.LinkError); ok && linkErr.Err == syscall.EXDEV {
f, ferr := os.Open(file)
if ferr != nil {
return ferr
}
defer func() { _ = f.Close() }()
_, err = r.Put(context.Background(), f)
}
}
return err
}
// Stat retrieves metadata for files stored in the repository.
func (r *Repository) Stat(ctx context.Context, id digest.Digest) (reflow.File, error) {
_, path := r.Path(id)
info, err := os.Stat(path)
if err != nil {
return reflow.File{}, errors.E("stat", r.Root, id, err)
}
return reflow.File{ID: id, Size: info.Size()}, nil
}
// Get retrieves the object named by a digest.
func (r *Repository) Get(ctx context.Context, id digest.Digest) (io.ReadCloser, error) {
_, path := r.Path(id)
rc, err := os.Open(path)
if err != nil {
return nil, errors.E("get", r.Root, id, err)
}
return rc, nil
}
// Remove removes an object from the repository.
func (r *Repository) Remove(id digest.Digest) error {
_, path := r.Path(id)
return os.Remove(path)
}
// ReadFrom installs an object directly from a foreign repository. If
// the foreign repository supports supports GetFile, it is used to
// download directly.
//
// GetFile(ctx context.Context, id digest.Digest, w io.WriterAt) (int64, error)
//
// This is used by implementations like S3, which can download
// multiple chunks at once, and requires an io.WriterAt so that
// chunks needn't be buffered in memory until the download is
// contiguous.
//
// ReadFrom singleflights concurrent downloads from the same key
// regardless of repository origin.
func (r *Repository) ReadFrom(ctx context.Context, id digest.Digest, u *url.URL) error {
if ok, _ := r.Contains(id); ok {
return nil
}
_, err, _ := r.read.Do(id.String(), func() (interface{}, error) {
repo, err := repository.Dial(u.String())
if err != nil {
return nil, err
}
type getFiler interface {
GetFile(ctx context.Context, id digest.Digest, w io.WriterAt) (int64, error)
}
if gf, ok := repo.(getFiler); ok {
temp, err := r.TempFile("getfile-")
if err != nil {
return nil, err
}
defer os.Remove(temp.Name())
_, err = gf.GetFile(ctx, id, temp)
if err != nil {
_ = temp.Close()
return nil, err
}
err = temp.Close()
if err != nil {
return nil, err
}
f, err := r.Install(temp.Name())
if err != nil {
return nil, err
}
if id != f.ID {
srcSize := "unknown size"
if srcStat, serr := repo.Stat(ctx, id); serr == nil {
srcSize = data.Size(srcStat.Size).String()
}
dstSize := "unknown size"
if dstStat, derr := temp.Stat(); derr == nil {
dstSize = data.Size(dstStat.Size()).String()
}
return nil, errors.E("readfrom (GetFile)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, f.ID, dstSize))
}
return nil, nil
}
rc, err := repo.Get(ctx, id)
if err != nil {
return nil, err
}
defer rc.Close()
id2, err := r.Put(ctx, rc)
if err != nil {
return nil, err
}
if id != id2 {
srcSize := "unknown size"
if srcStat, serr := repo.Stat(ctx, id); serr == nil {
srcSize = data.Size(srcStat.Size).String()
}
dstSize := "unknown size"
if dstStat, derr := r.Stat(ctx, id2); derr == nil {
dstSize = data.Size(dstStat.Size).String()
}
return nil, errors.E("readfrom (Get)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, id2, dstSize))
}
return nil, nil
})
if err != nil {
r.Log.Debug(err)
}
return err
}
// WriteTo writes an object directly to a foreign repository.
// If the foreign repository supports the PutFile method, it is
// used.
//
// PutFile(context.Context, reflow.File, io.Reader) error
//
// PutFile is useful for implementations like S3 which can upload
// multiple chunks at a time, and requires direct reader (and,
// dynamically, io.Seeker) access.
//
// WriteTo singleflights requests of the same ID and url.
func (r *Repository) WriteTo(ctx context.Context, id digest.Digest, u *url.URL) error {
if ok, _ := r.Contains(id); !ok {
return errors.E("writeto", r.Root, id, u.String(), errors.NotExist)
}
_, err, _ := r.write.Do(id.String()+u.String(), func() (interface{}, error) {
repo, err := repository.Dial(u.String())
if err != nil {
return nil, err
}
dir, path := r.Path(id)
if err := os.MkdirAll(dir, 0777); err != nil {
return nil, err
}
file, err := os.Open(path)
if err != nil {
return nil, errors.E("get", r.Root, id, err)
}
defer file.Close()
info, err := file.Stat()
if err != nil {
return nil, err
}
type putFiler interface {
PutFile(context.Context, reflow.File, io.Reader) error
}
if pf, ok := repo.(putFiler); ok {
return nil, pf.PutFile(ctx, reflow.File{ID: id, Size: info.Size()}, file)
}
id2, err := repo.Put(ctx, file)
if err != nil {
return nil, err
}
if id != id2 {
return nil, errors.E("writeto", r.Root, id, errors.Integrity, errors.Errorf("%v != %v", id, id2))
}
return nil, nil
})
return err
}
// URL returns the url of this repository.
func (r *Repository) URL() *url.URL {
return r.RepoURL
}
// Contains tells whether the repository has an object with a digest.
func (r *Repository) Contains(id digest.Digest) (bool, error) {
_, path := r.Path(id)
_, err := os.Stat(path)
if os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
// Put installs an object into the repository. Its digest identity is returned.
func (r *Repository) Put(ctx context.Context, body io.Reader) (digest.Digest, error) {
temp, err := r.TempFile("create-")
if err != nil {
return digest.Digest{}, err
}
defer os.Remove(temp.Name())
dw := reflow.Digester.NewWriter()
done := make(chan error, 1)
// This is a workaround to make sure that copies respect
// context cancellations. Note that the underlying copy is
// not actually cancelled, so this could lead to goroutine
// leaks.
go func() {
_, err = io.Copy(temp, io.TeeReader(body, dw))
temp.Close()
done <- err
}()
select {
case <-ctx.Done():
return digest.Digest{}, ctx.Err()
case err := <-done:
if err != nil {
return digest.Digest{}, err
}
dgst := dw.Digest()
return dgst, r.InstallDigest | InstallDigest | identifier_name |
repository.go | int64, error)
}
if gf, ok := repo.(getFiler); ok {
temp, err := r.TempFile("getfile-")
if err != nil {
return nil, err
}
defer os.Remove(temp.Name())
_, err = gf.GetFile(ctx, id, temp)
if err != nil {
_ = temp.Close()
return nil, err
}
err = temp.Close()
if err != nil {
return nil, err
}
f, err := r.Install(temp.Name())
if err != nil {
return nil, err
}
if id != f.ID {
srcSize := "unknown size"
if srcStat, serr := repo.Stat(ctx, id); serr == nil {
srcSize = data.Size(srcStat.Size).String()
}
dstSize := "unknown size"
if dstStat, derr := temp.Stat(); derr == nil {
dstSize = data.Size(dstStat.Size()).String()
}
return nil, errors.E("readfrom (GetFile)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, f.ID, dstSize))
}
return nil, nil
}
rc, err := repo.Get(ctx, id)
if err != nil {
return nil, err
}
defer rc.Close()
id2, err := r.Put(ctx, rc)
if err != nil {
return nil, err
}
if id != id2 {
srcSize := "unknown size"
if srcStat, serr := repo.Stat(ctx, id); serr == nil {
srcSize = data.Size(srcStat.Size).String()
}
dstSize := "unknown size"
if dstStat, derr := r.Stat(ctx, id2); derr == nil {
dstSize = data.Size(dstStat.Size).String()
}
return nil, errors.E("readfrom (Get)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, id2, dstSize))
}
return nil, nil
})
if err != nil {
r.Log.Debug(err)
}
return err
}
// WriteTo writes an object directly to a foreign repository.
// If the foreign repository supports the PutFile method, it is
// used.
//
// PutFile(context.Context, reflow.File, io.Reader) error
//
// PutFile is useful for implementations like S3 which can upload
// multiple chunks at a time, and requires direct reader (and,
// dynamically, io.Seeker) access.
//
// WriteTo singleflights requests of the same ID and url.
func (r *Repository) WriteTo(ctx context.Context, id digest.Digest, u *url.URL) error {
if ok, _ := r.Contains(id); !ok {
return errors.E("writeto", r.Root, id, u.String(), errors.NotExist)
}
_, err, _ := r.write.Do(id.String()+u.String(), func() (interface{}, error) {
repo, err := repository.Dial(u.String())
if err != nil {
return nil, err
}
dir, path := r.Path(id)
if err := os.MkdirAll(dir, 0777); err != nil {
return nil, err
}
file, err := os.Open(path)
if err != nil {
return nil, errors.E("get", r.Root, id, err)
}
defer file.Close()
info, err := file.Stat()
if err != nil {
return nil, err
}
type putFiler interface {
PutFile(context.Context, reflow.File, io.Reader) error
}
if pf, ok := repo.(putFiler); ok {
return nil, pf.PutFile(ctx, reflow.File{ID: id, Size: info.Size()}, file)
}
id2, err := repo.Put(ctx, file)
if err != nil {
return nil, err
}
if id != id2 {
return nil, errors.E("writeto", r.Root, id, errors.Integrity, errors.Errorf("%v != %v", id, id2))
}
return nil, nil
})
return err
}
// URL returns the url of this repository.
func (r *Repository) URL() *url.URL {
return r.RepoURL
}
// Contains tells whether the repository has an object with a digest.
func (r *Repository) Contains(id digest.Digest) (bool, error) {
_, path := r.Path(id)
_, err := os.Stat(path)
if os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
// Put installs an object into the repository. Its digest identity is returned.
func (r *Repository) Put(ctx context.Context, body io.Reader) (digest.Digest, error) {
temp, err := r.TempFile("create-")
if err != nil {
return digest.Digest{}, err
}
defer os.Remove(temp.Name())
dw := reflow.Digester.NewWriter()
done := make(chan error, 1)
// This is a workaround to make sure that copies respect
// context cancellations. Note that the underlying copy is
// not actually cancelled, so this could lead to goroutine
// leaks.
go func() {
_, err = io.Copy(temp, io.TeeReader(body, dw))
temp.Close()
done <- err
}()
select {
case <-ctx.Done():
return digest.Digest{}, ctx.Err()
case err := <-done:
if err != nil {
return digest.Digest{}, err
}
dgst := dw.Digest()
return dgst, r.InstallDigest(dgst, temp.Name())
}
}
// Materialize takes a mapping of path-to-object, and hardlinks the
// corresponding objects from the repository into the given root.
func (r *Repository) Materialize(root string, binds map[string]digest.Digest) error {
dirsMade := map[string]bool{}
for path, id := range binds {
path = filepath.Join(root, path)
dir := filepath.Dir(path)
if !dirsMade[dir] {
if err := os.MkdirAll(dir, 0777); err != nil {
return err
}
// TODO(marius): also insert parents
dirsMade[dir] = true
}
os.Remove(path) // best effort
_, rpath := r.Path(id)
if err := os.Link(rpath, path); err != nil {
// Copy if file was reported to be on a different device.
if linkErr, ok := err.(*os.LinkError); ok && linkErr.Err == syscall.EXDEV {
f, ferr := os.Create(path)
if ferr != nil {
return ferr
}
rc, rcerr := r.Get(context.Background(), id)
if rcerr != nil {
return rcerr
}
_, err = io.Copy(f, rc)
}
return err
}
}
return nil
}
// Vacuum moves all objects from the given repository to this one.
func (r *Repository) Vacuum(ctx context.Context, repo *Repository) error {
var w walker
w.Init(repo)
var errs errors.Multi
for w.Scan() {
if err := r.InstallDigest(w.Digest(), w.Path()); err != nil {
errs.Add(errors.E("vacuum", w.Digest(), err))
}
}
if err := w.Err(); err != nil {
errs.Add(err)
}
_ = repo.Collect(ctx, nil) // ignore errors
return errs.Combined()
}
// Scan invokes handler for each object in the repository.
func (r *Repository) Scan(ctx context.Context, handler func(digest.Digest) error) error {
var w walker
w.Init(r)
for w.Scan() {
if err := ctx.Err(); err != nil {
return err
}
err := handler(w.Digest())
if err != nil {
return err
}
}
return w.Err()
}
// CollectWithThreshold removes from this repository any objects not in the
// Liveset and whose creation times are not more recent than the
// threshold time.
func (r *Repository) CollectWithThreshold(ctx context.Context, live liveset.Liveset, dead liveset.Liveset, threshold time.Time, dryRun bool) error {
return errors.E("collectwiththreshold", errors.NotSupported)
}
// Collect removes any objects in the repository that are not also in
// the live set.
func (r *Repository) Collect(ctx context.Context, live liveset.Liveset) error {
var w walker
w.Init(r)
var (
n int
size int64
)
for w.Scan() | {
if live != nil && live.Contains(w.Digest()) {
continue
}
size += w.Info().Size()
if err := os.Remove(w.Path()); err != nil {
r.Log.Errorf("remove %q: %v", w.Path(), err)
}
// Clean up object subdirectories. (Ignores failure when nonempty.)
os.Remove(filepath.Dir(w.Path()))
n++
} | conditional_block |
|
repository.go | {
f, err := os.Open(file)
if err != nil {
return reflow.File{}, err
}
defer f.Close()
w := reflow.Digester.NewWriter()
n, err := io.Copy(w, f)
if err != nil {
return reflow.File{}, err
}
d := w.Digest()
return reflow.File{ID: d, Size: n}, r.InstallDigest(d, file)
}
// InstallDigest installs a file at the given digest. The caller guarantees
// that the file's bytes have the digest d.
func (r *Repository) InstallDigest(d digest.Digest, file string) error {
file, err := filepath.EvalSymlinks(file)
if err != nil {
return err
}
dir, path := r.Path(d)
if err := os.MkdirAll(dir, 0777); err != nil {
return err
}
err = os.Link(file, path)
if os.IsExist(err) {
err = nil
}
if err != nil {
// Copy if file was reported to be on a different device.
if linkErr, ok := err.(*os.LinkError); ok && linkErr.Err == syscall.EXDEV {
f, ferr := os.Open(file)
if ferr != nil {
return ferr
}
defer func() { _ = f.Close() }()
_, err = r.Put(context.Background(), f)
}
}
return err
}
// Stat retrieves metadata for files stored in the repository.
func (r *Repository) Stat(ctx context.Context, id digest.Digest) (reflow.File, error) {
_, path := r.Path(id)
info, err := os.Stat(path)
if err != nil {
return reflow.File{}, errors.E("stat", r.Root, id, err)
}
return reflow.File{ID: id, Size: info.Size()}, nil
}
// Get retrieves the object named by a digest.
func (r *Repository) Get(ctx context.Context, id digest.Digest) (io.ReadCloser, error) {
_, path := r.Path(id)
rc, err := os.Open(path)
if err != nil {
return nil, errors.E("get", r.Root, id, err)
}
return rc, nil
}
// Remove removes an object from the repository.
func (r *Repository) Remove(id digest.Digest) error {
_, path := r.Path(id)
return os.Remove(path)
}
// ReadFrom installs an object directly from a foreign repository. If
// the foreign repository supports supports GetFile, it is used to
// download directly.
//
// GetFile(ctx context.Context, id digest.Digest, w io.WriterAt) (int64, error)
//
// This is used by implementations like S3, which can download
// multiple chunks at once, and requires an io.WriterAt so that
// chunks needn't be buffered in memory until the download is
// contiguous.
//
// ReadFrom singleflights concurrent downloads from the same key
// regardless of repository origin.
func (r *Repository) ReadFrom(ctx context.Context, id digest.Digest, u *url.URL) error {
if ok, _ := r.Contains(id); ok {
return nil
}
_, err, _ := r.read.Do(id.String(), func() (interface{}, error) {
repo, err := repository.Dial(u.String())
if err != nil {
return nil, err
}
type getFiler interface {
GetFile(ctx context.Context, id digest.Digest, w io.WriterAt) (int64, error)
}
if gf, ok := repo.(getFiler); ok {
temp, err := r.TempFile("getfile-")
if err != nil {
return nil, err
}
defer os.Remove(temp.Name())
_, err = gf.GetFile(ctx, id, temp)
if err != nil {
_ = temp.Close()
return nil, err
}
err = temp.Close()
if err != nil {
return nil, err
}
f, err := r.Install(temp.Name())
if err != nil {
return nil, err
}
if id != f.ID {
srcSize := "unknown size"
if srcStat, serr := repo.Stat(ctx, id); serr == nil {
srcSize = data.Size(srcStat.Size).String()
}
dstSize := "unknown size"
if dstStat, derr := temp.Stat(); derr == nil {
dstSize = data.Size(dstStat.Size()).String()
}
return nil, errors.E("readfrom (GetFile)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, f.ID, dstSize))
}
return nil, nil
}
rc, err := repo.Get(ctx, id)
if err != nil {
return nil, err
}
defer rc.Close()
id2, err := r.Put(ctx, rc)
if err != nil {
return nil, err
}
if id != id2 {
srcSize := "unknown size"
if srcStat, serr := repo.Stat(ctx, id); serr == nil {
srcSize = data.Size(srcStat.Size).String()
}
dstSize := "unknown size"
if dstStat, derr := r.Stat(ctx, id2); derr == nil {
dstSize = data.Size(dstStat.Size).String()
}
return nil, errors.E("readfrom (Get)", u.String(), errors.Integrity, errors.Errorf("%v (%s) != %v (%s)", id, srcSize, id2, dstSize))
}
return nil, nil
})
if err != nil {
r.Log.Debug(err)
}
return err
}
// WriteTo writes an object directly to a foreign repository.
// If the foreign repository supports the PutFile method, it is
// used.
//
// PutFile(context.Context, reflow.File, io.Reader) error
//
// PutFile is useful for implementations like S3 which can upload
// multiple chunks at a time, and requires direct reader (and,
// dynamically, io.Seeker) access.
//
// WriteTo singleflights requests of the same ID and url.
func (r *Repository) WriteTo(ctx context.Context, id digest.Digest, u *url.URL) error {
if ok, _ := r.Contains(id); !ok {
return errors.E("writeto", r.Root, id, u.String(), errors.NotExist)
}
_, err, _ := r.write.Do(id.String()+u.String(), func() (interface{}, error) {
repo, err := repository.Dial(u.String())
if err != nil {
return nil, err
}
dir, path := r.Path(id)
if err := os.MkdirAll(dir, 0777); err != nil {
return nil, err
}
file, err := os.Open(path)
if err != nil {
return nil, errors.E("get", r.Root, id, err)
}
defer file.Close()
info, err := file.Stat()
if err != nil {
return nil, err
}
type putFiler interface {
PutFile(context.Context, reflow.File, io.Reader) error
}
if pf, ok := repo.(putFiler); ok {
return nil, pf.PutFile(ctx, reflow.File{ID: id, Size: info.Size()}, file)
}
id2, err := repo.Put(ctx, file)
if err != nil {
return nil, err
}
if id != id2 {
return nil, errors.E("writeto", r.Root, id, errors.Integrity, errors.Errorf("%v != %v", id, id2))
}
return nil, nil
})
return err
}
// URL returns the url of this repository.
func (r *Repository) URL() *url.URL {
return r.RepoURL
}
// Contains tells whether the repository has an object with a digest.
func (r *Repository) Contains(id digest.Digest) (bool, error) {
_, path := r.Path(id)
_, err := os.Stat(path)
if os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
// Put installs an object into the repository. Its digest identity is returned.
func (r *Repository) Put(ctx context.Context, body io.Reader) (digest.Digest, error) | case err := <-done:
if err != nil {
return digest.Digest{}, err
}
dgst := dw.Digest()
return dgst, r.InstallDigest | {
temp, err := r.TempFile("create-")
if err != nil {
return digest.Digest{}, err
}
defer os.Remove(temp.Name())
dw := reflow.Digester.NewWriter()
done := make(chan error, 1)
// This is a workaround to make sure that copies respect
// context cancellations. Note that the underlying copy is
// not actually cancelled, so this could lead to goroutine
// leaks.
go func() {
_, err = io.Copy(temp, io.TeeReader(body, dw))
temp.Close()
done <- err
}()
select {
case <-ctx.Done():
return digest.Digest{}, ctx.Err() | identifier_body |
pre_train.py |
# # next_sentence_labels = features['next_sentence_labels']
# else:
masked_lm_positions = features['masked_lm_positions']
masked_lm_ids = features['masked_lm_ids']
masked_lm_weights = features['masked_lm_weights']
if bert_config.train_type == 'seq2seq':
_info('Training seq2seq task.')
elif bert_config.train_type == 'lm':
_info('Training language model task.')
# build model
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask)
# compute loss
loss, per_loss, log_probs, logits = get_masked_lm_output(bert_config,
model.get_sequence_output(),
model.embedding_table,
model.projection_table,
masked_lm_positions,
masked_lm_ids,
masked_lm_weights,
mode)
if mode == tf.estimator.ModeKeys.PREDICT:
masked_lm_predictions = tf.reshape(tf.argmax(log_probs, axis=-1, output_type=tf.int32), [-1])
output_spec = tf.estimator.EstimatorSpec(mode, predictions=masked_lm_predictions)
else:
if mode == tf.estimator.ModeKeys.TRAIN:
# restore from the checkpoint,
# tf.estimator automatically restore from the model typically,
# maybe here is for restore some pre-trained parameters
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint:
(assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
_info('*** Trainable Variables ***')
for var in tvars:
init_string = ''
if var.name in initialized_variable_names:
init_string = ', *INIT_FROM_CKPT*'
_info('name = {}, shape={}{}'.format(var.name, var.shape, init_string))
train_op = optimization.create_optimizer(
loss, bert_config.learning_rate, num_train_steps, bert_config.lr_limit)
# learning_rate = tf.train.polynomial_decay(bert_config.learning_rate, | # num_train_steps,
# end_learning_rate=0.0,
# power=1.0,
# cycle=False)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)
# clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
# train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step())
output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
is_real_example = tf.ones(tf.shape(masked_lm_ids), dtype=tf.float32)
def metric_fn(loss, label_ids, logits, is_real_example):
"""
Args:
loss: tf.float32.
label_ids: [b, s].
logits: [b, s, v].
"""
# [b * s, v]
logits = tf.reshape(logits, [-1, logits.shape[-1]])
# [b * s, 1]
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
# [b * s]
label_ids = tf.reshape(label_ids, [-1])
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=loss)
return {'eval_accuracy': accuracy, 'eval_loss': loss}
eval_metrics = metric_fn(loss, masked_lm_ids, logits, is_real_example)
output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics)
return output_spec
return model_fn
def get_masked_lm_output(bert_config, input_tensor, embedding_table, projection_table, positions,
label_ids, label_weights, mode):
"""Get the loss for the answer according to the mask.
Args:
bert_config: config for bert.
input_tensor: float Tensor of shape [batch_size, seq_length, witdh].
embedding_table: [vocab_size, embedding_size].
projection_table: [embedding_size, hidden_size].
positions: tf.int32, which saves the positions for answers.
label_ids: tf.int32, which is the true labels.
label_weights: tf.int32, which is refers to the padding.
Returns:
loss: average word loss.
per_loss: per word loss.
log_probs: log probability.
"""
predicted_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope('cls/predictions'):
with tf.variable_scope('transform'):
input_tensor = tf.layers.dense(
predicted_tensor,
units=bert_config.hidden_size,
activation=gelu,
kernel_initializer=create_initializer(bert_config.initializer_range))
input_tensor = layer_norm(input_tensor)
output_bias = tf.get_variable(
'output_bias',
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
input_project = tf.matmul(input_tensor, projection_table, transpose_b=True)
logits = tf.matmul(input_project, embedding_table, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
# [some_length, vocab_size]
log_probs = tf.nn.log_softmax(logits, axis=-1)
if mode == tf.estimator.ModeKeys.PREDICT:
return None, None, tf.nn.softmax(logits, axis=-1)
# [some_length], no need to cast to tf.float32
label_ids = tf.reshape(label_ids, [-1])
# [some_length]
label_weights = tf.cast(tf.reshape(label_ids, [-1]), dtype=tf.float32)
# [some_length, vocab_size]
one_hot_labels = tf.one_hot(label_ids, depth=bert_config.vocab_size)
# [some_length, 1]
per_loss = - tf.reduce_sum(log_probs * one_hot_labels, axis=-1)
# ignore padding
numerator = tf.reduce_sum(label_weights * per_loss)
# the number of predicted items
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return loss, per_loss, log_probs, logits
def gather_indexes(input_tensor, positions):
"""Gather all the predicted tensor, input_tensor contains all the positions,
however, only maksed positions are used for calculating the loss.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, width].
positions: save the relative positions of each sentence's labels.
Returns:
output_tensor: [some_length, width], where some_length refers to all the predicted labels
in the data batch.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
# create a vector which saves the initial positions for each batch
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
# get the absolute positions for the predicted labels, [batch_size * seq_length, 1]
flat_postions = tf.reshape(positions + flat_offsets, [-1])
flat_input_tensor = tf.reshape(input_tensor,
[batch_size * seq_length, width])
# obtain the predicted items, [some_lenght, width]
output_tensor = tf.gather(flat_input_tensor, flat_postions)
return output_tensor
def main():
# tf.gfile.MakeDirs(FLAGS.output_dir)
Path(bert_config.model_dir).mkdir(exist_ok=True)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=bert_config.init_checkpoint,
learning_rate=bert_config.learning_rate,
num_train_steps=bert_config.num_train_steps)
input_fn = functools.partial(train_input_fn,
path=bert_config.data_path,
batch_size=bert_config.batch_size,
repeat_num=bert_config.num_train_steps,
max_length = bert_config.max_length,
train_type=bert_config.train_type,
reverse=bert_config.reverse)
run_config = tf.contrib.tpu.RunConfig(
keep_checkpoint_max=1,
save_checkpoints_steps=1000,
model_dir=bert_config.model_dir)
estimator = tf.estimator.Estimator(model_fn, config=run_config)
estimator.train(input_fn)
# train_spec = tf.estimator.TrainSpec(input_fn=input_fn)
# eval_spec = tf.estimator.EvalSpec(input_fn=input_fn, steps=1000)
# tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# for evaluation, the repeat_num in input_fn has to be reset
# estimator.evaluate(input_fn)
def package_model(model_path, pb_path):
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=None,
learning_rate=bert_config.learning_rate,
num_train_steps=bert_config.num_train_steps)
estimator = tf.estimator.Estimator(model_fn, model_path)
estimator.export_saved_model(pb_path, serving_input_receiver_fn)
if __name__ == '__main__':
main()
# package | # tf.train.get_or_create_global_step(), | random_line_split |
pre_train.py |
# # next_sentence_labels = features['next_sentence_labels']
# else:
masked_lm_positions = features['masked_lm_positions']
masked_lm_ids = features['masked_lm_ids']
masked_lm_weights = features['masked_lm_weights']
if bert_config.train_type == 'seq2seq':
_info('Training seq2seq task.')
elif bert_config.train_type == 'lm':
_info('Training language model task.')
# build model
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask)
# compute loss
loss, per_loss, log_probs, logits = get_masked_lm_output(bert_config,
model.get_sequence_output(),
model.embedding_table,
model.projection_table,
masked_lm_positions,
masked_lm_ids,
masked_lm_weights,
mode)
if mode == tf.estimator.ModeKeys.PREDICT:
masked_lm_predictions = tf.reshape(tf.argmax(log_probs, axis=-1, output_type=tf.int32), [-1])
output_spec = tf.estimator.EstimatorSpec(mode, predictions=masked_lm_predictions)
else:
if mode == tf.estimator.ModeKeys.TRAIN:
# restore from the checkpoint,
# tf.estimator automatically restore from the model typically,
# maybe here is for restore some pre-trained parameters
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint:
(assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
_info('*** Trainable Variables ***')
for var in tvars:
init_string = ''
if var.name in initialized_variable_names:
init_string = ', *INIT_FROM_CKPT*'
_info('name = {}, shape={}{}'.format(var.name, var.shape, init_string))
train_op = optimization.create_optimizer(
loss, bert_config.learning_rate, num_train_steps, bert_config.lr_limit)
# learning_rate = tf.train.polynomial_decay(bert_config.learning_rate,
# tf.train.get_or_create_global_step(),
# num_train_steps,
# end_learning_rate=0.0,
# power=1.0,
# cycle=False)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)
# clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
# train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step())
output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
is_real_example = tf.ones(tf.shape(masked_lm_ids), dtype=tf.float32)
def metric_fn(loss, label_ids, logits, is_real_example):
|
eval_metrics = metric_fn(loss, masked_lm_ids, logits, is_real_example)
output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics)
return output_spec
return model_fn
def get_masked_lm_output(bert_config, input_tensor, embedding_table, projection_table, positions,
label_ids, label_weights, mode):
"""Get the loss for the answer according to the mask.
Args:
bert_config: config for bert.
input_tensor: float Tensor of shape [batch_size, seq_length, witdh].
embedding_table: [vocab_size, embedding_size].
projection_table: [embedding_size, hidden_size].
positions: tf.int32, which saves the positions for answers.
label_ids: tf.int32, which is the true labels.
label_weights: tf.int32, which is refers to the padding.
Returns:
loss: average word loss.
per_loss: per word loss.
log_probs: log probability.
"""
predicted_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope('cls/predictions'):
with tf.variable_scope('transform'):
input_tensor = tf.layers.dense(
predicted_tensor,
units=bert_config.hidden_size,
activation=gelu,
kernel_initializer=create_initializer(bert_config.initializer_range))
input_tensor = layer_norm(input_tensor)
output_bias = tf.get_variable(
'output_bias',
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
input_project = tf.matmul(input_tensor, projection_table, transpose_b=True)
logits = tf.matmul(input_project, embedding_table, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
# [some_length, vocab_size]
log_probs = tf.nn.log_softmax(logits, axis=-1)
if mode == tf.estimator.ModeKeys.PREDICT:
return None, None, tf.nn.softmax(logits, axis=-1)
# [some_length], no need to cast to tf.float32
label_ids = tf.reshape(label_ids, [-1])
# [some_length]
label_weights = tf.cast(tf.reshape(label_ids, [-1]), dtype=tf.float32)
# [some_length, vocab_size]
one_hot_labels = tf.one_hot(label_ids, depth=bert_config.vocab_size)
# [some_length, 1]
per_loss = - tf.reduce_sum(log_probs * one_hot_labels, axis=-1)
# ignore padding
numerator = tf.reduce_sum(label_weights * per_loss)
# the number of predicted items
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return loss, per_loss, log_probs, logits
def gather_indexes(input_tensor, positions):
"""Gather all the predicted tensor, input_tensor contains all the positions,
however, only maksed positions are used for calculating the loss.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, width].
positions: save the relative positions of each sentence's labels.
Returns:
output_tensor: [some_length, width], where some_length refers to all the predicted labels
in the data batch.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
# create a vector which saves the initial positions for each batch
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
# get the absolute positions for the predicted labels, [batch_size * seq_length, 1]
flat_postions = tf.reshape(positions + flat_offsets, [-1])
flat_input_tensor = tf.reshape(input_tensor,
[batch_size * seq_length, width])
# obtain the predicted items, [some_lenght, width]
output_tensor = tf.gather(flat_input_tensor, flat_postions)
return output_tensor
def main():
# tf.gfile.MakeDirs(FLAGS.output_dir)
Path(bert_config.model_dir).mkdir(exist_ok=True)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=bert_config.init_checkpoint,
learning_rate=bert_config.learning_rate,
num_train_steps=bert_config.num_train_steps)
input_fn = functools.partial(train_input_fn,
path=bert_config.data_path,
batch_size=bert_config.batch_size,
repeat_num=bert_config.num_train_steps,
max_length = bert_config.max_length,
train_type=bert_config.train_type,
reverse=bert_config.reverse)
run_config = tf.contrib.tpu.RunConfig(
keep_checkpoint_max=1,
save_checkpoints_steps=1000,
model_dir=bert_config.model_dir)
estimator = tf.estimator.Estimator(model_fn, config=run_config)
estimator.train(input_fn)
# train_spec = tf.estimator.TrainSpec(input_fn=input_fn)
# eval_spec = tf.estimator.EvalSpec(input_fn=input_fn, steps=1000)
# tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# for evaluation, the repeat_num in input_fn has to be reset
# estimator.evaluate(input_fn)
def package_model(model_path, pb_path):
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=None,
learning_rate=bert_config.learning_rate,
num_train_steps=bert_config.num_train_steps)
estimator = tf.estimator.Estimator(model_fn, model_path)
estimator.export_saved_model(pb_path, serving_input_receiver_fn)
if __name__ == '__main__':
main()
# package | """
Args:
loss: tf.float32.
label_ids: [b, s].
logits: [b, s, v].
"""
# [b * s, v]
logits = tf.reshape(logits, [-1, logits.shape[-1]])
# [b * s, 1]
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
# [b * s]
label_ids = tf.reshape(label_ids, [-1])
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=loss)
return {'eval_accuracy': accuracy, 'eval_loss': loss} | identifier_body |
pre_train.py | # # next_sentence_labels = features['next_sentence_labels']
# else:
masked_lm_positions = features['masked_lm_positions']
masked_lm_ids = features['masked_lm_ids']
masked_lm_weights = features['masked_lm_weights']
if bert_config.train_type == 'seq2seq':
_info('Training seq2seq task.')
elif bert_config.train_type == 'lm':
_info('Training language model task.')
# build model
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask)
# compute loss
loss, per_loss, log_probs, logits = get_masked_lm_output(bert_config,
model.get_sequence_output(),
model.embedding_table,
model.projection_table,
masked_lm_positions,
masked_lm_ids,
masked_lm_weights,
mode)
if mode == tf.estimator.ModeKeys.PREDICT:
masked_lm_predictions = tf.reshape(tf.argmax(log_probs, axis=-1, output_type=tf.int32), [-1])
output_spec = tf.estimator.EstimatorSpec(mode, predictions=masked_lm_predictions)
else:
if mode == tf.estimator.ModeKeys.TRAIN:
# restore from the checkpoint,
# tf.estimator automatically restore from the model typically,
# maybe here is for restore some pre-trained parameters
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint:
(assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
_info('*** Trainable Variables ***')
for var in tvars:
init_string = ''
if var.name in initialized_variable_names:
init_string = ', *INIT_FROM_CKPT*'
_info('name = {}, shape={}{}'.format(var.name, var.shape, init_string))
train_op = optimization.create_optimizer(
loss, bert_config.learning_rate, num_train_steps, bert_config.lr_limit)
# learning_rate = tf.train.polynomial_decay(bert_config.learning_rate,
# tf.train.get_or_create_global_step(),
# num_train_steps,
# end_learning_rate=0.0,
# power=1.0,
# cycle=False)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)
# clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
# train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step())
output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
is_real_example = tf.ones(tf.shape(masked_lm_ids), dtype=tf.float32)
def | (loss, label_ids, logits, is_real_example):
"""
Args:
loss: tf.float32.
label_ids: [b, s].
logits: [b, s, v].
"""
# [b * s, v]
logits = tf.reshape(logits, [-1, logits.shape[-1]])
# [b * s, 1]
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
# [b * s]
label_ids = tf.reshape(label_ids, [-1])
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=loss)
return {'eval_accuracy': accuracy, 'eval_loss': loss}
eval_metrics = metric_fn(loss, masked_lm_ids, logits, is_real_example)
output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics)
return output_spec
return model_fn
def get_masked_lm_output(bert_config, input_tensor, embedding_table, projection_table, positions,
label_ids, label_weights, mode):
"""Get the loss for the answer according to the mask.
Args:
bert_config: config for bert.
input_tensor: float Tensor of shape [batch_size, seq_length, witdh].
embedding_table: [vocab_size, embedding_size].
projection_table: [embedding_size, hidden_size].
positions: tf.int32, which saves the positions for answers.
label_ids: tf.int32, which is the true labels.
label_weights: tf.int32, which is refers to the padding.
Returns:
loss: average word loss.
per_loss: per word loss.
log_probs: log probability.
"""
predicted_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope('cls/predictions'):
with tf.variable_scope('transform'):
input_tensor = tf.layers.dense(
predicted_tensor,
units=bert_config.hidden_size,
activation=gelu,
kernel_initializer=create_initializer(bert_config.initializer_range))
input_tensor = layer_norm(input_tensor)
output_bias = tf.get_variable(
'output_bias',
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
input_project = tf.matmul(input_tensor, projection_table, transpose_b=True)
logits = tf.matmul(input_project, embedding_table, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
# [some_length, vocab_size]
log_probs = tf.nn.log_softmax(logits, axis=-1)
if mode == tf.estimator.ModeKeys.PREDICT:
return None, None, tf.nn.softmax(logits, axis=-1)
# [some_length], no need to cast to tf.float32
label_ids = tf.reshape(label_ids, [-1])
# [some_length]
label_weights = tf.cast(tf.reshape(label_ids, [-1]), dtype=tf.float32)
# [some_length, vocab_size]
one_hot_labels = tf.one_hot(label_ids, depth=bert_config.vocab_size)
# [some_length, 1]
per_loss = - tf.reduce_sum(log_probs * one_hot_labels, axis=-1)
# ignore padding
numerator = tf.reduce_sum(label_weights * per_loss)
# the number of predicted items
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return loss, per_loss, log_probs, logits
def gather_indexes(input_tensor, positions):
"""Gather all the predicted tensor, input_tensor contains all the positions,
however, only maksed positions are used for calculating the loss.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, width].
positions: save the relative positions of each sentence's labels.
Returns:
output_tensor: [some_length, width], where some_length refers to all the predicted labels
in the data batch.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
# create a vector which saves the initial positions for each batch
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
# get the absolute positions for the predicted labels, [batch_size * seq_length, 1]
flat_postions = tf.reshape(positions + flat_offsets, [-1])
flat_input_tensor = tf.reshape(input_tensor,
[batch_size * seq_length, width])
# obtain the predicted items, [some_lenght, width]
output_tensor = tf.gather(flat_input_tensor, flat_postions)
return output_tensor
def main():
# tf.gfile.MakeDirs(FLAGS.output_dir)
Path(bert_config.model_dir).mkdir(exist_ok=True)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=bert_config.init_checkpoint,
learning_rate=bert_config.learning_rate,
num_train_steps=bert_config.num_train_steps)
input_fn = functools.partial(train_input_fn,
path=bert_config.data_path,
batch_size=bert_config.batch_size,
repeat_num=bert_config.num_train_steps,
max_length = bert_config.max_length,
train_type=bert_config.train_type,
reverse=bert_config.reverse)
run_config = tf.contrib.tpu.RunConfig(
keep_checkpoint_max=1,
save_checkpoints_steps=1000,
model_dir=bert_config.model_dir)
estimator = tf.estimator.Estimator(model_fn, config=run_config)
estimator.train(input_fn)
# train_spec = tf.estimator.TrainSpec(input_fn=input_fn)
# eval_spec = tf.estimator.EvalSpec(input_fn=input_fn, steps=1000)
# tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# for evaluation, the repeat_num in input_fn has to be reset
# estimator.evaluate(input_fn)
def package_model(model_path, pb_path):
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=None,
learning_rate=bert_config.learning_rate,
num_train_steps=bert_config.num_train_steps)
estimator = tf.estimator.Estimator(model_fn, model_path)
estimator.export_saved_model(pb_path, serving_input_receiver_fn)
if __name__ == '__main__':
main()
# package | metric_fn | identifier_name |
pre_train.py |
# # next_sentence_labels = features['next_sentence_labels']
# else:
masked_lm_positions = features['masked_lm_positions']
masked_lm_ids = features['masked_lm_ids']
masked_lm_weights = features['masked_lm_weights']
if bert_config.train_type == 'seq2seq':
|
elif bert_config.train_type == 'lm':
_info('Training language model task.')
# build model
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask)
# compute loss
loss, per_loss, log_probs, logits = get_masked_lm_output(bert_config,
model.get_sequence_output(),
model.embedding_table,
model.projection_table,
masked_lm_positions,
masked_lm_ids,
masked_lm_weights,
mode)
if mode == tf.estimator.ModeKeys.PREDICT:
masked_lm_predictions = tf.reshape(tf.argmax(log_probs, axis=-1, output_type=tf.int32), [-1])
output_spec = tf.estimator.EstimatorSpec(mode, predictions=masked_lm_predictions)
else:
if mode == tf.estimator.ModeKeys.TRAIN:
# restore from the checkpoint,
# tf.estimator automatically restore from the model typically,
# maybe here is for restore some pre-trained parameters
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint:
(assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
_info('*** Trainable Variables ***')
for var in tvars:
init_string = ''
if var.name in initialized_variable_names:
init_string = ', *INIT_FROM_CKPT*'
_info('name = {}, shape={}{}'.format(var.name, var.shape, init_string))
train_op = optimization.create_optimizer(
loss, bert_config.learning_rate, num_train_steps, bert_config.lr_limit)
# learning_rate = tf.train.polynomial_decay(bert_config.learning_rate,
# tf.train.get_or_create_global_step(),
# num_train_steps,
# end_learning_rate=0.0,
# power=1.0,
# cycle=False)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)
# clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
# train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step())
output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
is_real_example = tf.ones(tf.shape(masked_lm_ids), dtype=tf.float32)
def metric_fn(loss, label_ids, logits, is_real_example):
"""
Args:
loss: tf.float32.
label_ids: [b, s].
logits: [b, s, v].
"""
# [b * s, v]
logits = tf.reshape(logits, [-1, logits.shape[-1]])
# [b * s, 1]
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
# [b * s]
label_ids = tf.reshape(label_ids, [-1])
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=loss)
return {'eval_accuracy': accuracy, 'eval_loss': loss}
eval_metrics = metric_fn(loss, masked_lm_ids, logits, is_real_example)
output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics)
return output_spec
return model_fn
def get_masked_lm_output(bert_config, input_tensor, embedding_table, projection_table, positions,
label_ids, label_weights, mode):
"""Get the loss for the answer according to the mask.
Args:
bert_config: config for bert.
input_tensor: float Tensor of shape [batch_size, seq_length, witdh].
embedding_table: [vocab_size, embedding_size].
projection_table: [embedding_size, hidden_size].
positions: tf.int32, which saves the positions for answers.
label_ids: tf.int32, which is the true labels.
label_weights: tf.int32, which is refers to the padding.
Returns:
loss: average word loss.
per_loss: per word loss.
log_probs: log probability.
"""
predicted_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope('cls/predictions'):
with tf.variable_scope('transform'):
input_tensor = tf.layers.dense(
predicted_tensor,
units=bert_config.hidden_size,
activation=gelu,
kernel_initializer=create_initializer(bert_config.initializer_range))
input_tensor = layer_norm(input_tensor)
output_bias = tf.get_variable(
'output_bias',
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
input_project = tf.matmul(input_tensor, projection_table, transpose_b=True)
logits = tf.matmul(input_project, embedding_table, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
# [some_length, vocab_size]
log_probs = tf.nn.log_softmax(logits, axis=-1)
if mode == tf.estimator.ModeKeys.PREDICT:
return None, None, tf.nn.softmax(logits, axis=-1)
# [some_length], no need to cast to tf.float32
label_ids = tf.reshape(label_ids, [-1])
# [some_length]
label_weights = tf.cast(tf.reshape(label_ids, [-1]), dtype=tf.float32)
# [some_length, vocab_size]
one_hot_labels = tf.one_hot(label_ids, depth=bert_config.vocab_size)
# [some_length, 1]
per_loss = - tf.reduce_sum(log_probs * one_hot_labels, axis=-1)
# ignore padding
numerator = tf.reduce_sum(label_weights * per_loss)
# the number of predicted items
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return loss, per_loss, log_probs, logits
def gather_indexes(input_tensor, positions):
"""Gather all the predicted tensor, input_tensor contains all the positions,
however, only maksed positions are used for calculating the loss.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, width].
positions: save the relative positions of each sentence's labels.
Returns:
output_tensor: [some_length, width], where some_length refers to all the predicted labels
in the data batch.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
# create a vector which saves the initial positions for each batch
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
# get the absolute positions for the predicted labels, [batch_size * seq_length, 1]
flat_postions = tf.reshape(positions + flat_offsets, [-1])
flat_input_tensor = tf.reshape(input_tensor,
[batch_size * seq_length, width])
# obtain the predicted items, [some_lenght, width]
output_tensor = tf.gather(flat_input_tensor, flat_postions)
return output_tensor
def main():
# tf.gfile.MakeDirs(FLAGS.output_dir)
Path(bert_config.model_dir).mkdir(exist_ok=True)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=bert_config.init_checkpoint,
learning_rate=bert_config.learning_rate,
num_train_steps=bert_config.num_train_steps)
input_fn = functools.partial(train_input_fn,
path=bert_config.data_path,
batch_size=bert_config.batch_size,
repeat_num=bert_config.num_train_steps,
max_length = bert_config.max_length,
train_type=bert_config.train_type,
reverse=bert_config.reverse)
run_config = tf.contrib.tpu.RunConfig(
keep_checkpoint_max=1,
save_checkpoints_steps=1000,
model_dir=bert_config.model_dir)
estimator = tf.estimator.Estimator(model_fn, config=run_config)
estimator.train(input_fn)
# train_spec = tf.estimator.TrainSpec(input_fn=input_fn)
# eval_spec = tf.estimator.EvalSpec(input_fn=input_fn, steps=1000)
# tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# for evaluation, the repeat_num in input_fn has to be reset
# estimator.evaluate(input_fn)
def package_model(model_path, pb_path):
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=None,
learning_rate=bert_config.learning_rate,
num_train_steps=bert_config.num_train_steps)
estimator = tf.estimator.Estimator(model_fn, model_path)
estimator.export_saved_model(pb_path, serving_input_receiver_fn)
if __name__ == '__main__':
main()
# package | _info('Training seq2seq task.') | conditional_block |
layer.go | . discard this.
done()
r.blobCacheMu.Lock()
r.blobCache.Remove(name)
r.blobCacheMu.Unlock()
}
httpCache, err := newCache(filepath.Join(r.rootDir, "httpcache"), r.config.HTTPCacheType, r.config)
if err != nil {
return nil, fmt.Errorf("failed to create http cache: %w", err)
}
defer func() {
if retErr != nil {
httpCache.Close()
}
}()
// Resolve the blob and cache the result.
b, err := r.resolver.Resolve(ctx, hosts, refspec, desc, httpCache)
if err != nil {
return nil, fmt.Errorf("failed to resolve the source: %w", err)
}
r.blobCacheMu.Lock()
cachedB, done, added := r.blobCache.Add(name, b)
r.blobCacheMu.Unlock()
if !added {
b.Close() // blob already exists in the cache. discard this.
}
return &blobRef{cachedB.(remote.Blob), done}, nil
}
func newLayer(
resolver *Resolver,
desc ocispec.Descriptor,
blob *blobRef,
vr *reader.VerifiableReader,
) *layer {
return &layer{
resolver: resolver,
desc: desc,
blob: blob,
verifiableReader: vr,
prefetchWaiter: newWaiter(),
}
}
type layer struct {
resolver *Resolver
desc ocispec.Descriptor
blob *blobRef
verifiableReader *reader.VerifiableReader
prefetchWaiter *waiter
prefetchSize int64
prefetchSizeMu sync.Mutex
r reader.Reader
closed bool
closedMu sync.Mutex
prefetchOnce sync.Once
backgroundFetchOnce sync.Once
}
func (l *layer) Info() Info {
var readTime time.Time
if l.r != nil {
readTime = l.r.LastOnDemandReadTime()
}
return Info{
Digest: l.desc.Digest,
Size: l.blob.Size(),
FetchedSize: l.blob.FetchedSize(),
PrefetchSize: l.prefetchedSize(),
ReadTime: readTime,
}
}
func (l *layer) prefetchedSize() int64 {
l.prefetchSizeMu.Lock()
sz := l.prefetchSize
l.prefetchSizeMu.Unlock()
return sz
}
func (l *layer) Check() error {
if l.isClosed() {
return fmt.Errorf("layer is already closed")
}
return l.blob.Check()
}
func (l *layer) Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error {
if l.isClosed() {
return fmt.Errorf("layer is already closed")
}
return l.blob.Refresh(ctx, hosts, refspec, desc)
}
func (l *layer) Verify(tocDigest digest.Digest) (err error) {
if l.isClosed() {
return fmt.Errorf("layer is already closed")
}
if l.r != nil {
return nil
}
l.r, err = l.verifiableReader.VerifyTOC(tocDigest)
return
}
func (l *layer) SkipVerify() {
if l.r != nil {
return
}
l.r = l.verifiableReader.SkipVerify()
}
func (l *layer) Prefetch(prefetchSize int64) (err error) {
l.prefetchOnce.Do(func() {
ctx := context.Background()
l.resolver.backgroundTaskManager.DoPrioritizedTask()
defer l.resolver.backgroundTaskManager.DonePrioritizedTask()
err = l.prefetch(ctx, prefetchSize)
if err != nil {
log.G(ctx).WithError(err).Warnf("failed to prefetch layer=%v", l.desc.Digest)
return
}
log.G(ctx).Debug("completed to prefetch")
})
return
}
func (l *layer) prefetch(ctx context.Context, prefetchSize int64) error {
defer l.prefetchWaiter.done() // Notify the completion
// Measuring the total time to complete prefetch (use defer func() because l.Info().PrefetchSize is set later)
start := time.Now()
defer func() {
commonmetrics.WriteLatencyWithBytesLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchTotal, start, commonmetrics.PrefetchSize, l.prefetchedSize())
}()
if l.isClosed() {
return fmt.Errorf("layer is already closed")
}
rootID := l.verifiableReader.Metadata().RootID()
if _, _, err := l.verifiableReader.Metadata().GetChild(rootID, estargz.NoPrefetchLandmark); err == nil {
// do not prefetch this layer
return nil
} else if id, _, err := l.verifiableReader.Metadata().GetChild(rootID, estargz.PrefetchLandmark); err == nil {
offset, err := l.verifiableReader.Metadata().GetOffset(id)
if err != nil {
return fmt.Errorf("failed to get offset of prefetch landmark: %w", err)
}
// override the prefetch size with optimized value
prefetchSize = offset
} else if prefetchSize > l.blob.Size() {
// adjust prefetch size not to exceed the whole layer size
prefetchSize = l.blob.Size()
}
// Fetch the target range
downloadStart := time.Now()
err := l.blob.Cache(0, prefetchSize)
commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchDownload, downloadStart) // time to download prefetch data
if err != nil {
return fmt.Errorf("failed to prefetch layer: %w", err)
}
// Set prefetch size for metrics after prefetch completed
l.prefetchSizeMu.Lock()
l.prefetchSize = prefetchSize
l.prefetchSizeMu.Unlock()
// Cache uncompressed contents of the prefetched range
decompressStart := time.Now()
err = l.verifiableReader.Cache(reader.WithFilter(func(offset int64) bool {
return offset < prefetchSize // Cache only prefetch target
}))
commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchDecompress, decompressStart) // time to decompress prefetch data
if err != nil {
return fmt.Errorf("failed to cache prefetched layer: %w", err)
}
return nil
}
func (l *layer) WaitForPrefetchCompletion() error {
if l.isClosed() {
return fmt.Errorf("layer is already closed")
}
return l.prefetchWaiter.wait(l.resolver.prefetchTimeout)
}
func (l *layer) BackgroundFetch() (err error) {
l.backgroundFetchOnce.Do(func() {
ctx := context.Background()
err = l.backgroundFetch(ctx)
if err != nil {
log.G(ctx).WithError(err).Warnf("failed to fetch whole layer=%v", l.desc.Digest)
return
}
log.G(ctx).Debug("completed to fetch all layer data in background")
})
return
}
func (l *layer) backgroundFetch(ctx context.Context) error {
defer commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.BackgroundFetchTotal, time.Now())
if l.isClosed() {
return fmt.Errorf("layer is already closed")
}
br := io.NewSectionReader(readerAtFunc(func(p []byte, offset int64) (retN int, retErr error) {
l.resolver.backgroundTaskManager.InvokeBackgroundTask(func(ctx context.Context) {
// Measuring the time to download background fetch data (in milliseconds)
defer commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.BackgroundFetchDownload, l.Info().Digest, time.Now()) // time to download background fetch data
retN, retErr = l.blob.ReadAt(
p,
offset,
remote.WithContext(ctx), // Make cancellable
remote.WithCacheOpts(cache.Direct()), // Do not pollute mem cache
)
}, 120*time.Second)
return
}), 0, l.blob.Size())
defer commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.BackgroundFetchDecompress, time.Now()) // time to decompress background fetch data (in milliseconds)
return l.verifiableReader.Cache(
reader.WithReader(br), // Read contents in background
reader.WithCacheOpts(cache.Direct()), // Do not pollute mem cache
)
}
func (l *layerRef) Done() {
l.done()
}
func (l *layer) RootNode(baseInode uint32) (fusefs.InodeEmbedder, error) {
if l.isClosed() {
return nil, fmt.Errorf("layer is already closed")
}
if l.r == nil {
return nil, fmt.Errorf("layer hasn't been verified yet")
}
return newNode(l.desc.Digest, l.r, l.blob, baseInode, l.resolver.overlayOpaqueType)
}
func (l *layer) ReadAt(p []byte, offset int64, opts ...remote.Option) (int, error) {
return l.blob.ReadAt(p, offset, opts...)
}
func (l *layer) close() error {
l.closedMu.Lock()
defer l.closedMu.Unlock()
if l.closed {
return nil
}
l.closed = true
defer l.blob.done() // Close reader first, then close the blob
l.verifiableReader.Close()
if l.r != nil {
return l.r.Close()
}
return nil
}
func (l *layer) | isClosed | identifier_name |
|
layer.go | *reader.VerifiableReader,
) *layer {
return &layer{
resolver: resolver,
desc: desc,
blob: blob,
verifiableReader: vr,
prefetchWaiter: newWaiter(),
}
}
type layer struct {
resolver *Resolver
desc ocispec.Descriptor
blob *blobRef
verifiableReader *reader.VerifiableReader
prefetchWaiter *waiter
prefetchSize int64
prefetchSizeMu sync.Mutex
r reader.Reader
closed bool
closedMu sync.Mutex
prefetchOnce sync.Once
backgroundFetchOnce sync.Once
}
func (l *layer) Info() Info {
var readTime time.Time
if l.r != nil {
readTime = l.r.LastOnDemandReadTime()
}
return Info{
Digest: l.desc.Digest,
Size: l.blob.Size(),
FetchedSize: l.blob.FetchedSize(),
PrefetchSize: l.prefetchedSize(),
ReadTime: readTime,
}
}
func (l *layer) prefetchedSize() int64 {
l.prefetchSizeMu.Lock()
sz := l.prefetchSize
l.prefetchSizeMu.Unlock()
return sz
}
func (l *layer) Check() error {
if l.isClosed() {
return fmt.Errorf("layer is already closed")
}
return l.blob.Check()
}
func (l *layer) Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error {
if l.isClosed() {
return fmt.Errorf("layer is already closed")
}
return l.blob.Refresh(ctx, hosts, refspec, desc)
}
func (l *layer) Verify(tocDigest digest.Digest) (err error) {
if l.isClosed() {
return fmt.Errorf("layer is already closed")
}
if l.r != nil {
return nil
}
l.r, err = l.verifiableReader.VerifyTOC(tocDigest)
return
}
func (l *layer) SkipVerify() {
if l.r != nil {
return
}
l.r = l.verifiableReader.SkipVerify()
}
func (l *layer) Prefetch(prefetchSize int64) (err error) {
l.prefetchOnce.Do(func() {
ctx := context.Background()
l.resolver.backgroundTaskManager.DoPrioritizedTask()
defer l.resolver.backgroundTaskManager.DonePrioritizedTask()
err = l.prefetch(ctx, prefetchSize)
if err != nil {
log.G(ctx).WithError(err).Warnf("failed to prefetch layer=%v", l.desc.Digest)
return
}
log.G(ctx).Debug("completed to prefetch")
})
return
}
func (l *layer) prefetch(ctx context.Context, prefetchSize int64) error {
defer l.prefetchWaiter.done() // Notify the completion
// Measuring the total time to complete prefetch (use defer func() because l.Info().PrefetchSize is set later)
start := time.Now()
defer func() {
commonmetrics.WriteLatencyWithBytesLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchTotal, start, commonmetrics.PrefetchSize, l.prefetchedSize())
}()
if l.isClosed() {
return fmt.Errorf("layer is already closed")
}
rootID := l.verifiableReader.Metadata().RootID()
if _, _, err := l.verifiableReader.Metadata().GetChild(rootID, estargz.NoPrefetchLandmark); err == nil {
// do not prefetch this layer
return nil
} else if id, _, err := l.verifiableReader.Metadata().GetChild(rootID, estargz.PrefetchLandmark); err == nil {
offset, err := l.verifiableReader.Metadata().GetOffset(id)
if err != nil {
return fmt.Errorf("failed to get offset of prefetch landmark: %w", err)
}
// override the prefetch size with optimized value
prefetchSize = offset
} else if prefetchSize > l.blob.Size() {
// adjust prefetch size not to exceed the whole layer size
prefetchSize = l.blob.Size()
}
// Fetch the target range
downloadStart := time.Now()
err := l.blob.Cache(0, prefetchSize)
commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchDownload, downloadStart) // time to download prefetch data
if err != nil {
return fmt.Errorf("failed to prefetch layer: %w", err)
}
// Set prefetch size for metrics after prefetch completed
l.prefetchSizeMu.Lock()
l.prefetchSize = prefetchSize
l.prefetchSizeMu.Unlock()
// Cache uncompressed contents of the prefetched range
decompressStart := time.Now()
err = l.verifiableReader.Cache(reader.WithFilter(func(offset int64) bool {
return offset < prefetchSize // Cache only prefetch target
}))
commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.PrefetchDecompress, decompressStart) // time to decompress prefetch data
if err != nil {
return fmt.Errorf("failed to cache prefetched layer: %w", err)
}
return nil
}
func (l *layer) WaitForPrefetchCompletion() error {
if l.isClosed() {
return fmt.Errorf("layer is already closed")
}
return l.prefetchWaiter.wait(l.resolver.prefetchTimeout)
}
func (l *layer) BackgroundFetch() (err error) {
l.backgroundFetchOnce.Do(func() {
ctx := context.Background()
err = l.backgroundFetch(ctx)
if err != nil {
log.G(ctx).WithError(err).Warnf("failed to fetch whole layer=%v", l.desc.Digest)
return
}
log.G(ctx).Debug("completed to fetch all layer data in background")
})
return
}
func (l *layer) backgroundFetch(ctx context.Context) error {
defer commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.BackgroundFetchTotal, time.Now())
if l.isClosed() {
return fmt.Errorf("layer is already closed")
}
br := io.NewSectionReader(readerAtFunc(func(p []byte, offset int64) (retN int, retErr error) {
l.resolver.backgroundTaskManager.InvokeBackgroundTask(func(ctx context.Context) {
// Measuring the time to download background fetch data (in milliseconds)
defer commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.BackgroundFetchDownload, l.Info().Digest, time.Now()) // time to download background fetch data
retN, retErr = l.blob.ReadAt(
p,
offset,
remote.WithContext(ctx), // Make cancellable
remote.WithCacheOpts(cache.Direct()), // Do not pollute mem cache
)
}, 120*time.Second)
return
}), 0, l.blob.Size())
defer commonmetrics.WriteLatencyLogValue(ctx, l.desc.Digest, commonmetrics.BackgroundFetchDecompress, time.Now()) // time to decompress background fetch data (in milliseconds)
return l.verifiableReader.Cache(
reader.WithReader(br), // Read contents in background
reader.WithCacheOpts(cache.Direct()), // Do not pollute mem cache
)
}
func (l *layerRef) Done() {
l.done()
}
func (l *layer) RootNode(baseInode uint32) (fusefs.InodeEmbedder, error) {
if l.isClosed() {
return nil, fmt.Errorf("layer is already closed")
}
if l.r == nil {
return nil, fmt.Errorf("layer hasn't been verified yet")
}
return newNode(l.desc.Digest, l.r, l.blob, baseInode, l.resolver.overlayOpaqueType)
}
func (l *layer) ReadAt(p []byte, offset int64, opts ...remote.Option) (int, error) {
return l.blob.ReadAt(p, offset, opts...)
}
func (l *layer) close() error {
l.closedMu.Lock()
defer l.closedMu.Unlock()
if l.closed {
return nil
}
l.closed = true
defer l.blob.done() // Close reader first, then close the blob
l.verifiableReader.Close()
if l.r != nil {
return l.r.Close()
}
return nil
}
func (l *layer) isClosed() bool {
l.closedMu.Lock()
closed := l.closed
l.closedMu.Unlock()
return closed
}
// blobRef is a reference to the blob in the cache. Calling `done` decreases the reference counter
// of this blob in the underlying cache. When nobody refers to the blob in the cache, resources bound
// to this blob will be discarded.
type blobRef struct {
remote.Blob
done func()
}
// layerRef is a reference to the layer in the cache. Calling `Done` or `done` decreases the
// reference counter of this blob in the underlying cache. When nobody refers to the layer in the
// cache, resources bound to this layer will be discarded.
type layerRef struct {
*layer
done func()
}
func newWaiter() *waiter {
return &waiter{
completionCond: sync.NewCond(&sync.Mutex{}),
}
}
type waiter struct {
isDone bool
isDoneMu sync.Mutex
completionCond *sync.Cond
}
func (w *waiter) done() | {
w.isDoneMu.Lock()
w.isDone = true
w.isDoneMu.Unlock()
w.completionCond.Broadcast()
} | identifier_body |
|
layer.go | FdEntry := dcc.MaxCacheFds
if maxFdEntry == 0 {
maxFdEntry = defaultMaxCacheFds
}
bufPool := &sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
dCache, fCache := cacheutil.NewLRUCache(maxDataEntry), cacheutil.NewLRUCache(maxFdEntry)
dCache.OnEvicted = func(key string, value interface{}) {
value.(*bytes.Buffer).Reset()
bufPool.Put(value)
}
fCache.OnEvicted = func(key string, value interface{}) {
value.(*os.File).Close()
}
// create a cache on an unique directory
if err := os.MkdirAll(root, 0700); err != nil {
return nil, err
}
cachePath, err := os.MkdirTemp(root, "")
if err != nil {
return nil, fmt.Errorf("failed to initialize directory cache: %w", err)
}
return cache.NewDirectoryCache(
cachePath,
cache.DirectoryCacheConfig{
SyncAdd: dcc.SyncAdd,
DataCache: dCache,
FdCache: fCache,
BufPool: bufPool,
Direct: dcc.Direct,
},
)
}
// Resolve resolves a layer based on the passed layer blob information.
func (r *Resolver) Resolve(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor, esgzOpts ...metadata.Option) (_ Layer, retErr error) {
name := refspec.String() + "/" + desc.Digest.String()
// Wait if resolving this layer is already running. The result
// can hopefully get from the cache.
r.resolveLock.Lock(name)
defer r.resolveLock.Unlock(name)
ctx = log.WithLogger(ctx, log.G(ctx).WithField("src", name))
// First, try to retrieve this layer from the underlying cache.
r.layerCacheMu.Lock()
c, done, ok := r.layerCache.Get(name)
r.layerCacheMu.Unlock()
if ok {
if l := c.(*layer); l.Check() == nil {
log.G(ctx).Debugf("hit layer cache %q", name)
return &layerRef{l, done}, nil
}
// Cached layer is invalid
done()
r.layerCacheMu.Lock()
r.layerCache.Remove(name)
r.layerCacheMu.Unlock()
}
log.G(ctx).Debugf("resolving")
// Resolve the blob.
blobR, err := r.resolveBlob(ctx, hosts, refspec, desc)
if err != nil {
return nil, fmt.Errorf("failed to resolve the blob: %w", err)
}
defer func() {
if retErr != nil {
blobR.done()
}
}()
fsCache, err := newCache(filepath.Join(r.rootDir, "fscache"), r.config.FSCacheType, r.config)
if err != nil {
return nil, fmt.Errorf("failed to create fs cache: %w", err)
}
defer func() {
if retErr != nil {
fsCache.Close()
}
}()
// Get a reader for stargz archive.
// Each file's read operation is a prioritized task and all background tasks
// will be stopped during the execution so this can avoid being disturbed for
// NW traffic by background tasks.
sr := io.NewSectionReader(readerAtFunc(func(p []byte, offset int64) (n int, err error) {
r.backgroundTaskManager.DoPrioritizedTask()
defer r.backgroundTaskManager.DonePrioritizedTask()
return blobR.ReadAt(p, offset)
}), 0, blobR.Size())
// define telemetry hooks to measure latency metrics inside estargz package
telemetry := metadata.Telemetry{
GetFooterLatency: func(start time.Time) {
commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzFooterGet, desc.Digest, start)
},
GetTocLatency: func(start time.Time) {
commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzTocGet, desc.Digest, start)
},
DeserializeTocLatency: func(start time.Time) {
commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.DeserializeTocJSON, desc.Digest, start)
},
}
additionalDecompressors := []metadata.Decompressor{new(zstdchunked.Decompressor)}
if r.additionalDecompressors != nil {
additionalDecompressors = append(additionalDecompressors, r.additionalDecompressors(ctx, hosts, refspec, desc)...)
}
meta, err := r.metadataStore(sr,
append(esgzOpts, metadata.WithTelemetry(&telemetry), metadata.WithDecompressors(additionalDecompressors...))...)
if err != nil {
return nil, err
}
vr, err := reader.NewReader(meta, fsCache, desc.Digest)
if err != nil {
return nil, fmt.Errorf("failed to read layer: %w", err)
}
// Combine layer information together and cache it.
l := newLayer(r, desc, blobR, vr)
r.layerCacheMu.Lock()
cachedL, done2, added := r.layerCache.Add(name, l)
r.layerCacheMu.Unlock()
if !added {
l.close() // layer already exists in the cache. discrad this.
}
log.G(ctx).Debugf("resolved")
return &layerRef{cachedL.(*layer), done2}, nil
}
// resolveBlob resolves a blob based on the passed layer blob information.
func (r *Resolver) resolveBlob(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) (_ *blobRef, retErr error) {
name := refspec.String() + "/" + desc.Digest.String()
// Try to retrieve the blob from the underlying cache.
r.blobCacheMu.Lock()
c, done, ok := r.blobCache.Get(name)
r.blobCacheMu.Unlock()
if ok {
if blob := c.(remote.Blob); blob.Check() == nil {
return &blobRef{blob, done}, nil
}
// invalid blob. discard this.
done()
r.blobCacheMu.Lock()
r.blobCache.Remove(name)
r.blobCacheMu.Unlock()
}
httpCache, err := newCache(filepath.Join(r.rootDir, "httpcache"), r.config.HTTPCacheType, r.config)
if err != nil {
return nil, fmt.Errorf("failed to create http cache: %w", err)
}
defer func() {
if retErr != nil {
httpCache.Close()
}
}()
// Resolve the blob and cache the result.
b, err := r.resolver.Resolve(ctx, hosts, refspec, desc, httpCache)
if err != nil {
return nil, fmt.Errorf("failed to resolve the source: %w", err)
}
r.blobCacheMu.Lock()
cachedB, done, added := r.blobCache.Add(name, b)
r.blobCacheMu.Unlock()
if !added {
b.Close() // blob already exists in the cache. discard this.
}
return &blobRef{cachedB.(remote.Blob), done}, nil
}
func newLayer(
resolver *Resolver,
desc ocispec.Descriptor,
blob *blobRef,
vr *reader.VerifiableReader,
) *layer {
return &layer{
resolver: resolver,
desc: desc,
blob: blob,
verifiableReader: vr,
prefetchWaiter: newWaiter(),
}
}
type layer struct {
resolver *Resolver
desc ocispec.Descriptor
blob *blobRef
verifiableReader *reader.VerifiableReader
prefetchWaiter *waiter
prefetchSize int64
prefetchSizeMu sync.Mutex
r reader.Reader
closed bool
closedMu sync.Mutex
prefetchOnce sync.Once
backgroundFetchOnce sync.Once
}
func (l *layer) Info() Info {
var readTime time.Time
if l.r != nil {
readTime = l.r.LastOnDemandReadTime()
}
return Info{
Digest: l.desc.Digest,
Size: l.blob.Size(),
FetchedSize: l.blob.FetchedSize(),
PrefetchSize: l.prefetchedSize(),
ReadTime: readTime,
}
}
func (l *layer) prefetchedSize() int64 {
l.prefetchSizeMu.Lock()
sz := l.prefetchSize
l.prefetchSizeMu.Unlock()
return sz
}
func (l *layer) Check() error {
if l.isClosed() {
return fmt.Errorf("layer is already closed")
}
return l.blob.Check()
}
func (l *layer) Refresh(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) error {
if l.isClosed() {
return fmt.Errorf("layer is already closed")
}
return l.blob.Refresh(ctx, hosts, refspec, desc)
}
func (l *layer) Verify(tocDigest digest.Digest) (err error) {
if l.isClosed() {
return fmt.Errorf("layer is already closed")
}
if l.r != nil {
return nil
}
l.r, err = l.verifiableReader.VerifyTOC(tocDigest)
return
}
func (l *layer) SkipVerify() {
if l.r != nil | {
return
} | conditional_block |
|
layer.go | .Spec, ocispec.Descriptor) []metadata.Decompressor
}
// NewResolver returns a new layer resolver.
func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager, cfg config.Config, resolveHandlers map[string]remote.Handler, metadataStore metadata.Store, overlayOpaqueType OverlayOpaqueType, additionalDecompressors func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor) (*Resolver, error) {
resolveResultEntryTTL := time.Duration(cfg.ResolveResultEntryTTLSec) * time.Second
if resolveResultEntryTTL == 0 {
resolveResultEntryTTL = defaultResolveResultEntryTTLSec * time.Second
}
prefetchTimeout := time.Duration(cfg.PrefetchTimeoutSec) * time.Second
if prefetchTimeout == 0 {
prefetchTimeout = defaultPrefetchTimeoutSec * time.Second
}
// layerCache caches resolved layers for future use. This is useful in a use-case where
// the filesystem resolves and caches all layers in an image (not only queried one) in parallel,
// before they are actually queried.
layerCache := cacheutil.NewTTLCache(resolveResultEntryTTL)
layerCache.OnEvicted = func(key string, value interface{}) {
if err := value.(*layer).close(); err != nil {
logrus.WithField("key", key).WithError(err).Warnf("failed to clean up layer")
return
}
logrus.WithField("key", key).Debugf("cleaned up layer")
}
// blobCache caches resolved blobs for futural use. This is especially useful when a layer
// isn't eStargz/stargz (the *layer object won't be created/cached in this case).
blobCache := cacheutil.NewTTLCache(resolveResultEntryTTL)
blobCache.OnEvicted = func(key string, value interface{}) {
if err := value.(remote.Blob).Close(); err != nil {
logrus.WithField("key", key).WithError(err).Warnf("failed to clean up blob")
return
}
logrus.WithField("key", key).Debugf("cleaned up blob")
}
if err := os.MkdirAll(root, 0700); err != nil {
return nil, err
}
return &Resolver{
rootDir: root,
resolver: remote.NewResolver(cfg.BlobConfig, resolveHandlers),
layerCache: layerCache,
blobCache: blobCache,
prefetchTimeout: prefetchTimeout,
backgroundTaskManager: backgroundTaskManager,
config: cfg,
resolveLock: new(namedmutex.NamedMutex),
metadataStore: metadataStore,
overlayOpaqueType: overlayOpaqueType,
additionalDecompressors: additionalDecompressors,
}, nil
}
func newCache(root string, cacheType string, cfg config.Config) (cache.BlobCache, error) {
if cacheType == memoryCacheType {
return cache.NewMemoryCache(), nil
}
dcc := cfg.DirectoryCacheConfig
maxDataEntry := dcc.MaxLRUCacheEntry
if maxDataEntry == 0 {
maxDataEntry = defaultMaxLRUCacheEntry
}
maxFdEntry := dcc.MaxCacheFds
if maxFdEntry == 0 {
maxFdEntry = defaultMaxCacheFds
}
bufPool := &sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
dCache, fCache := cacheutil.NewLRUCache(maxDataEntry), cacheutil.NewLRUCache(maxFdEntry)
dCache.OnEvicted = func(key string, value interface{}) {
value.(*bytes.Buffer).Reset()
bufPool.Put(value)
}
fCache.OnEvicted = func(key string, value interface{}) {
value.(*os.File).Close()
}
// create a cache on an unique directory
if err := os.MkdirAll(root, 0700); err != nil {
return nil, err
}
cachePath, err := os.MkdirTemp(root, "")
if err != nil {
return nil, fmt.Errorf("failed to initialize directory cache: %w", err)
}
return cache.NewDirectoryCache(
cachePath,
cache.DirectoryCacheConfig{
SyncAdd: dcc.SyncAdd,
DataCache: dCache,
FdCache: fCache,
BufPool: bufPool,
Direct: dcc.Direct,
},
)
}
// Resolve resolves a layer based on the passed layer blob information.
func (r *Resolver) Resolve(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor, esgzOpts ...metadata.Option) (_ Layer, retErr error) {
name := refspec.String() + "/" + desc.Digest.String()
// Wait if resolving this layer is already running. The result
// can hopefully get from the cache.
r.resolveLock.Lock(name)
defer r.resolveLock.Unlock(name)
ctx = log.WithLogger(ctx, log.G(ctx).WithField("src", name))
// First, try to retrieve this layer from the underlying cache.
r.layerCacheMu.Lock()
c, done, ok := r.layerCache.Get(name)
r.layerCacheMu.Unlock()
if ok {
if l := c.(*layer); l.Check() == nil {
log.G(ctx).Debugf("hit layer cache %q", name)
return &layerRef{l, done}, nil
}
// Cached layer is invalid
done()
r.layerCacheMu.Lock()
r.layerCache.Remove(name) | }
log.G(ctx).Debugf("resolving")
// Resolve the blob.
blobR, err := r.resolveBlob(ctx, hosts, refspec, desc)
if err != nil {
return nil, fmt.Errorf("failed to resolve the blob: %w", err)
}
defer func() {
if retErr != nil {
blobR.done()
}
}()
fsCache, err := newCache(filepath.Join(r.rootDir, "fscache"), r.config.FSCacheType, r.config)
if err != nil {
return nil, fmt.Errorf("failed to create fs cache: %w", err)
}
defer func() {
if retErr != nil {
fsCache.Close()
}
}()
// Get a reader for stargz archive.
// Each file's read operation is a prioritized task and all background tasks
// will be stopped during the execution so this can avoid being disturbed for
// NW traffic by background tasks.
sr := io.NewSectionReader(readerAtFunc(func(p []byte, offset int64) (n int, err error) {
r.backgroundTaskManager.DoPrioritizedTask()
defer r.backgroundTaskManager.DonePrioritizedTask()
return blobR.ReadAt(p, offset)
}), 0, blobR.Size())
// define telemetry hooks to measure latency metrics inside estargz package
telemetry := metadata.Telemetry{
GetFooterLatency: func(start time.Time) {
commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzFooterGet, desc.Digest, start)
},
GetTocLatency: func(start time.Time) {
commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzTocGet, desc.Digest, start)
},
DeserializeTocLatency: func(start time.Time) {
commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.DeserializeTocJSON, desc.Digest, start)
},
}
additionalDecompressors := []metadata.Decompressor{new(zstdchunked.Decompressor)}
if r.additionalDecompressors != nil {
additionalDecompressors = append(additionalDecompressors, r.additionalDecompressors(ctx, hosts, refspec, desc)...)
}
meta, err := r.metadataStore(sr,
append(esgzOpts, metadata.WithTelemetry(&telemetry), metadata.WithDecompressors(additionalDecompressors...))...)
if err != nil {
return nil, err
}
vr, err := reader.NewReader(meta, fsCache, desc.Digest)
if err != nil {
return nil, fmt.Errorf("failed to read layer: %w", err)
}
// Combine layer information together and cache it.
l := newLayer(r, desc, blobR, vr)
r.layerCacheMu.Lock()
cachedL, done2, added := r.layerCache.Add(name, l)
r.layerCacheMu.Unlock()
if !added {
l.close() // layer already exists in the cache. discrad this.
}
log.G(ctx).Debugf("resolved")
return &layerRef{cachedL.(*layer), done2}, nil
}
// resolveBlob resolves a blob based on the passed layer blob information.
func (r *Resolver) resolveBlob(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) (_ *blobRef, retErr error) {
name := refspec.String() + "/" + desc.Digest.String()
// Try to retrieve the blob from the underlying cache.
r.blobCacheMu.Lock()
c, done, ok := r.blobCache.Get(name)
r.blobCacheMu.Unlock()
if ok {
if blob := c.(remote.Blob); blob.Check() == nil {
return &blobRef{blob, done}, nil
}
// invalid blob. discard this.
done()
r.blobCacheMu.Lock()
r | r.layerCacheMu.Unlock() | random_line_split |
app.py | Exclude beers user doesn't like at all if known beers can be recommended
dislike_beers = melt_df[melt_df['rating'] < 1]['product'].to_list()
for dislike_beer in dislike_beers:
if dislike_beer in recommendable_beers:
recommendable_beers.remove(dislike_beer)
st.text("")
st.text("")
st.text("")
if st.button('Gerar recomendações'):
model = load_model('data/recommending_system')
if len(recommendable_beers) == 0:
st.error('Não temos nenhuma cerveja para te recomendar :/')
else:
with st.spinner(text='Aguarde um instante enquanto analisamos as suas respostas...'):
sleep(4) # Pretend making recommendations takes a while. Actually they are pretty fast
recommendations = model.recommend(
users=[-1],
k=3,
items=recommendable_beers,
new_observation_data=SFrame(new_observation_data),
exclude_known=exclude_known,
).to_dataframe()
# st.dataframe(recommendations)
if recommendations.empty and exclude_known:
st.error('Você conhece muitas cervejas ein?! Que tal desmarcar a caixa acima?')
else:
st.success('Pronto! Selecione no menu à esquerda a página Recomendações.')
sleep(3)
state.recommendations, state.paladar = recommendations, df_paladar
def display_sugestoes(state):
st.title(':beers: CERVEJAS RECOMENDADAS')
st.markdown('''
Estas são as cervejas artesanais brasileiras **mais recomendadas para você**.
Ao final, você poderá enviar a lista de cervejas para o seu e-mail.
''')
recommendations, df_paladar = state.recommendations, state.paladar
# st.dataframe(df_paladar)
# st.dataframe(recommendations)
if not isinstance(recommendations, pd.DataFrame):
st.error('Sua sessão expirou, responda novamente o formulário para ver as suas recomendações.')
else:
rename_beer_styles = {
'Cerveja Blonde': 'Blonde Ale',
'Cerveja Trigo': 'Weiss (Trigo)',
'Cerveja APA': 'American Pale Ale',
'Cerveja IPA': 'India Pale Ale',
'Cerveja Session IPA': 'Session IPA',
'Cerveja NEIPA': 'New England IPA',
'Cerveja Porter': 'Porter/Stout',
'Cerveja Malzbier': 'Dunkel/Malzbier',
'Cerveja Witbier': 'Witbier',
'Cerveja Sour': 'Sour/Fruit',
'Cerveja RIS': 'Russian Imperial Stout',
'Cerveja Lambic': 'Lambic'
}
recommendations.replace({'product': rename_beer_styles}, inplace=True)
with st.spinner('Buscando cervejas...'):
df_cervejas = get_beer_list()
recommended_labels = pd.merge(recommendations, df_cervejas, left_on='product', right_on='terabeer_style')
recommended_labels.sort_values(by=['score', 'ratings_avg'], ascending=[False, False])
# st.dataframe(recommended_labels)
origins = recommended_labels['origin_state'].unique().tolist()
origin_filter = st.multiselect("Filtrar por estado:", origins, default=origins)
filtered_labels = recommended_labels[recommended_labels['origin_state'].isin(origin_filter)]
max_beers = st.slider('Número máximo de rótulos por estilo', 1, 5, 3)
df_style_1 = filtered_labels[filtered_labels['rank'] == 1]
df_style_2 = filtered_labels[filtered_labels['rank'] == 2]
df_style_3 = filtered_labels[filtered_labels['rank'] == 3]
markdown_list = []
image_list = []
for df_style in [df_style_1, df_style_2, df_style_3]:
if not df_style.empty:
df_style.reset_index(drop=True, inplace=True)
style_name = df_style['terabeer_style'][0]
style_rank = df_style['rank'][0]
style_score = df_style['score'][0]
style_description = df_style['style_description'][0]
style_harmonization = df_style['harmonization'][0]
if style_harmonization:
harmonization_line = f'<br><br> <b>Harmoniza bem com</b>: {style_harmonization}'
else:
harmonization_line = ''
style_markdown = f"""
<div>
<br>
<h2>
Estilo {style_rank}: <b>{style_name}</b> ({style_score:.1%} recomendado para você)
</h2>
<br>
<p>
<b>Descrição</b>: {style_description} {harmonization_line}
</p>
<br>
</div>
"""
st.markdown(style_markdown, unsafe_allow_html=True)
markdown_list.append(style_markdown)
for index, row in df_style.iloc[0:max_beers, :].iterrows():
beer = row['name']
brewery = row['brand']
abv = row['abv']
ibu = row['ibu']
avg_rating = row['ratings_avg']
count_ratings = int(row['ratings_count'])
figure = row['figure']
ratings_source = row['ratings_source']
ratings_url = row['ratings_url']
origin_state = row['origin_state']
offer_url = row['offer_url']
discount_coupon = row['discount_coupon']
column1, column2 = st.beta_columns((1, 4))
with column1: # Column with beer labels
try:
st.image(f'fig/{figure}', use_column_width=True)
image_list.append(f'fig/{figure}')
markdown_list.append(
f"""
<br>
<div>
<img
src="cid:image{len(image_list)}"
alt="Logo"
style="width:200px;height:200px;">
</div>
"""
)
except FileNotFoundError:
st.image('fig/placeholder-image.jpg', use_column_width=True)
image_list.append('fig/placeholder-image.jpg')
markdown_list.append(
f"""
<br>
<div>
<img
src="cid:image{len(image_list)}"
alt="Logo"
style="width:200px;height:200px;">
</div>
"""
)
with column2: # Column with beer characteristics
ratings_source_url = f'<a href="{ratings_url}" target="_blank">{ratings_source}</a>'
ratings_line = f'{avg_rating:.3} ({count_ratings} avaliações no {ratings_source_url})'
ibu_line = f'{int(ibu)} unidades de amargor' if ibu > 0 else 'Indisponível'
discount_phrase = f'(Cupom de desconto: {discount_coupon})' if discount_coupon else ''
offer_line = f'<b><a href="{offer_url}" target="_blank">Quero!</a></b> {discount_phrase}'
beer_markdown = f"""
<div>
<h3>{beer} - {brewery}</h3>
<p>
<b>Origem</b>: {origin_state}<br>
<b>Nota média</b>: {ratings_line}<br>
<b>ABV</b>: {abv}% álcool <br>
<b>IBU</b>: {ibu_line} <br>
{offer_line}
</p>
</div>
"""
st.markdown(beer_markdown, unsafe_allow_html=True)
markdown_list.append(beer_markdown)
st.text("")
st.text("")
st.markdown("### :mailbox: Para receber a lista acima no seu e-mail, digite-o abaixo e aperte enter:")
email = st.text_input('')
if email:
st.markdown("### Qual seu nome?")
name = st.text_input(' ')
accept_beer_offers = st.checkbox(
'Aceito receber novidades do TeraBeer.',
True
)
allow_data_usage = st.checkbox(
'Permito que utilizem minhas respostas para melhorar recomendações futuras.',
True
)
st.text("")
if st.button('Enviar recomendações por email'):
with st.spinner(text='Enviando...'):
| send_mail(email, name, markdown_list, image_list)
st.success('Enviado! Confira sua caixa de entrada e lixo eletrônico.')
if accept_beer_offers or allow_data_usage: # Try to send answers to database
db = DBFunctions()
try:
db.send_answers_to_db(
email=email,
name=name,
recommendations=recommendations,
df_paladar=df_paladar,
accept_beer_offers=accept_beer_offers,
allow_data_usage=allow_data_usage,
)
except KeyError:
pass
class _SessionState:
| conditional_block |
|
app.py | {
'Cerveja Blonde': 'Blonde Ale',
'Cerveja Trigo': 'Weiss (Trigo)',
'Cerveja APA': 'American Pale Ale',
'Cerveja IPA': 'India Pale Ale',
'Cerveja Session IPA': 'Session IPA',
'Cerveja NEIPA': 'New England IPA',
'Cerveja Porter': 'Porter/Stout',
'Cerveja Malzbier': 'Dunkel/Malzbier',
'Cerveja Witbier': 'Witbier',
'Cerveja Sour': 'Sour/Fruit',
'Cerveja RIS': 'Russian Imperial Stout',
'Cerveja Lambic': 'Lambic'
}
recommendations.replace({'product': rename_beer_styles}, inplace=True)
with st.spinner('Buscando cervejas...'):
df_cervejas = get_beer_list()
recommended_labels = pd.merge(recommendations, df_cervejas, left_on='product', right_on='terabeer_style')
recommended_labels.sort_values(by=['score', 'ratings_avg'], ascending=[False, False])
# st.dataframe(recommended_labels)
origins = recommended_labels['origin_state'].unique().tolist()
origin_filter = st.multiselect("Filtrar por estado:", origins, default=origins)
filtered_labels = recommended_labels[recommended_labels['origin_state'].isin(origin_filter)]
max_beers = st.slider('Número máximo de rótulos por estilo', 1, 5, 3)
df_style_1 = filtered_labels[filtered_labels['rank'] == 1]
df_style_2 = filtered_labels[filtered_labels['rank'] == 2]
df_style_3 = filtered_labels[filtered_labels['rank'] == 3]
markdown_list = []
image_list = []
for df_style in [df_style_1, df_style_2, df_style_3]:
if not df_style.empty:
df_style.reset_index(drop=True, inplace=True)
style_name = df_style['terabeer_style'][0]
style_rank = df_style['rank'][0]
style_score = df_style['score'][0]
style_description = df_style['style_description'][0]
style_harmonization = df_style['harmonization'][0]
if style_harmonization:
harmonization_line = f'<br><br> <b>Harmoniza bem com</b>: {style_harmonization}'
else:
harmonization_line = ''
style_markdown = f"""
<div>
<br>
<h2>
Estilo {style_rank}: <b>{style_name}</b> ({style_score:.1%} recomendado para você)
</h2>
<br>
<p>
<b>Descrição</b>: {style_description} {harmonization_line}
</p>
<br>
</div>
"""
st.markdown(style_markdown, unsafe_allow_html=True)
markdown_list.append(style_markdown)
for index, row in df_style.iloc[0:max_beers, :].iterrows():
beer = row['name']
brewery = row['brand']
abv = row['abv']
ibu = row['ibu']
avg_rating = row['ratings_avg']
count_ratings = int(row['ratings_count'])
figure = row['figure']
ratings_source = row['ratings_source']
ratings_url = row['ratings_url']
origin_state = row['origin_state']
offer_url = row['offer_url']
discount_coupon = row['discount_coupon']
column1, column2 = st.beta_columns((1, 4))
with column1: # Column with beer labels
try:
st.image(f'fig/{figure}', use_column_width=True)
image_list.append(f'fig/{figure}')
markdown_list.append(
f"""
<br>
<div>
<img
src="cid:image{len(image_list)}"
alt="Logo"
style="width:200px;height:200px;">
</div>
"""
)
except FileNotFoundError:
st.image('fig/placeholder-image.jpg', use_column_width=True)
image_list.append('fig/placeholder-image.jpg')
markdown_list.append(
f"""
<br>
<div>
<img
src="cid:image{len(image_list)}"
alt="Logo"
style="width:200px;height:200px;">
</div>
"""
)
with column2: # Column with beer characteristics
ratings_source_url = f'<a href="{ratings_url}" target="_blank">{ratings_source}</a>'
ratings_line = f'{avg_rating:.3} ({count_ratings} avaliações no {ratings_source_url})'
ibu_line = f'{int(ibu)} unidades de amargor' if ibu > 0 else 'Indisponível'
discount_phrase = f'(Cupom de desconto: {discount_coupon})' if discount_coupon else ''
offer_line = f'<b><a href="{offer_url}" target="_blank">Quero!</a></b> {discount_phrase}'
beer_markdown = f"""
<div>
<h3>{beer} - {brewery}</h3>
<p>
<b>Origem</b>: {origin_state}<br>
<b>Nota média</b>: {ratings_line}<br>
<b>ABV</b>: {abv}% álcool <br>
<b>IBU</b>: {ibu_line} <br>
{offer_line}
</p>
</div>
"""
st.markdown(beer_markdown, unsafe_allow_html=True)
markdown_list.append(beer_markdown)
st.text("")
st.text("")
st.markdown("### :mailbox: Para receber a lista acima no seu e-mail, digite-o abaixo e aperte enter:")
email = st.text_input('')
if email:
st.markdown("### Qual seu nome?")
name = st.text_input(' ')
accept_beer_offers = st.checkbox(
'Aceito receber novidades do TeraBeer.',
True
)
allow_data_usage = st.checkbox(
'Permito que utilizem minhas respostas para melhorar recomendações futuras.',
True
)
st.text("")
if st.button('Enviar recomendações por email'):
with st.spinner(text='Enviando...'):
send_mail(email, name, markdown_list, image_list)
st.success('Enviado! Confira sua caixa de entrada e lixo eletrônico.')
if accept_beer_offers or allow_data_usage: # Try to send answers to database
db = DBFunctions()
try:
db.send_answers_to_db(
email=email,
name=name,
recommendations=recommendations,
df_paladar=df_paladar,
accept_beer_offers=accept_beer_offers,
allow_data_usage=allow_data_usage,
)
except KeyError:
pass
class _SessionState:
def __init__(self, session, hash_funcs):
"""Initialize SessionState instance."""
self.__dict__["_state"] = {
"data": {},
"hash": None,
"hasher": _CodeHasher(hash_funcs),
"is_rerun": False,
"session": session,
}
def __call__(self, **kwargs):
"""Initialize state data once."""
for item, value in kwargs.items():
if item not in self._state["data"]:
self._state["data"][item] = value
def __getitem__(self, item):
"""Return a saved state value, None if item is undefined."""
return self._state["data"].get(item, None)
def __getattr__(self, item):
"""Return a saved state value, None if item is undefined."""
return self._state["data"].get(item, None)
def __setitem__(self, item, value):
"""Set state value."""
self._state["data"][item] = value
def __setattr__(self, item, value):
"""Set state value."""
self._state["data"][item] = value
def clear(self):
"""Clear session state and request a rerun."""
self._state["data"].clear()
self._state["session"].request_rerun()
def sync(self):
"""Rerun the app with all state values up to date from the | beginning to fix rollbacks."""
# Ensure to rerun only once to avoid infinite loops
# caused by a constantly changing state value at each run.
#
# Example: state.value += 1
if self._state["is_rerun"]:
self._state["is_rerun"] = False
elif self._state["hash"] is not None:
if self._state["hash"] != self._state["hasher"].to_bytes(self._state["data"], None):
self._state["is_rerun"] = True
self._state["session"].request_rerun()
self._state["hash"] = self._state["hasher"].to_bytes(self._state["data"], None)
def _get_session():
session_id = get_report_ctx( | identifier_body |
|
app.py | erveja Blonde': 'Blonde Ale',
'Cerveja Trigo': 'Weiss (Trigo)',
'Cerveja APA': 'American Pale Ale',
'Cerveja IPA': 'India Pale Ale',
'Cerveja Session IPA': 'Session IPA',
'Cerveja NEIPA': 'New England IPA',
'Cerveja Porter': 'Porter/Stout',
'Cerveja Malzbier': 'Dunkel/Malzbier',
'Cerveja Witbier': 'Witbier',
'Cerveja Sour': 'Sour/Fruit',
'Cerveja RIS': 'Russian Imperial Stout',
'Cerveja Lambic': 'Lambic'
}
recommendations.replace({'product': rename_beer_styles}, inplace=True)
with st.spinner('Buscando cervejas...'):
df_cervejas = get_beer_list()
recommended_labels = pd.merge(recommendations, df_cervejas, left_on='product', right_on='terabeer_style')
recommended_labels.sort_values(by=['score', 'ratings_avg'], ascending=[False, False])
# st.dataframe(recommended_labels)
origins = recommended_labels['origin_state'].unique().tolist()
origin_filter = st.multiselect("Filtrar por estado:", origins, default=origins)
filtered_labels = recommended_labels[recommended_labels['origin_state'].isin(origin_filter)]
max_beers = st.slider('Número máximo de rótulos por estilo', 1, 5, 3)
df_style_1 = filtered_labels[filtered_labels['rank'] == 1]
df_style_2 = filtered_labels[filtered_labels['rank'] == 2]
df_style_3 = filtered_labels[filtered_labels['rank'] == 3]
markdown_list = []
image_list = []
for df_style in [df_style_1, df_style_2, df_style_3]:
if not df_style.empty:
df_style.reset_index(drop=True, inplace=True)
style_name = df_style['terabeer_style'][0]
style_rank = df_style['rank'][0]
style_score = df_style['score'][0]
style_description = df_style['style_description'][0]
style_harmonization = df_style['harmonization'][0]
if style_harmonization:
harmonization_line = f'<br><br> <b>Harmoniza bem com</b>: {style_harmonization}'
else:
harmonization_line = ''
style_markdown = f"""
<div>
<br>
<h2>
Estilo {style_rank}: <b>{style_name}</b> ({style_score:.1%} recomendado para você)
</h2>
<br>
<p>
<b>Descrição</b>: {style_description} {harmonization_line}
</p>
<br>
</div>
"""
st.markdown(style_markdown, unsafe_allow_html=True)
markdown_list.append(style_markdown)
for index, row in df_style.iloc[0:max_beers, :].iterrows():
beer = row['name']
brewery = row['brand']
abv = row['abv']
ibu = row['ibu']
avg_rating = row['ratings_avg']
count_ratings = int(row['ratings_count'])
figure = row['figure']
ratings_source = row['ratings_source']
ratings_url = row['ratings_url']
origin_state = row['origin_state']
offer_url = row['offer_url']
discount_coupon = row['discount_coupon']
column1, column2 = st.beta_columns((1, 4))
with column1: # Column with beer labels
try:
st.image(f'fig/{figure}', use_column_width=True)
image_list.append(f'fig/{figure}')
markdown_list.append(
f"""
<br>
<div>
<img
src="cid:image{len(image_list)}"
alt="Logo"
style="width:200px;height:200px;">
</div>
"""
)
except FileNotFoundError:
st.image('fig/placeholder-image.jpg', use_column_width=True)
image_list.append('fig/placeholder-image.jpg')
markdown_list.append(
f"""
<br>
<div>
<img
src="cid:image{len(image_list)}"
alt="Logo"
style="width:200px;height:200px;">
</div>
"""
)
with column2: # Column with beer characteristics
ratings_source_url = f'<a href="{ratings_url}" target="_blank">{ratings_source}</a>'
ratings_line = f'{avg_rating:.3} ({count_ratings} avaliações no {ratings_source_url})'
ibu_line = f'{int(ibu)} unidades de amargor' if ibu > 0 else 'Indisponível'
discount_phrase = f'(Cupom de desconto: {discount_coupon})' if discount_coupon else ''
offer_line = f'<b><a href="{offer_url}" target="_blank">Quero!</a></b> {discount_phrase}'
beer_markdown = f"""
<div>
<h3>{beer} - {brewery}</h3>
<p>
<b>Origem</b>: {origin_state}<br>
<b>Nota média</b>: {ratings_line}<br>
<b>ABV</b>: {abv}% álcool <br>
<b>IBU</b>: {ibu_line} <br>
{offer_line}
</p>
</div>
"""
st.markdown(beer_markdown, unsafe_allow_html=True)
markdown_list.append(beer_markdown)
st.text("")
st.text("")
st.markdown("### :mailbox: Para receber a lista acima no seu e-mail, digite-o abaixo e aperte enter:")
email = st.text_input('')
if email:
st.markdown("### Qual seu nome?")
name = st.text_input(' ')
accept_beer_offers = st.checkbox(
'Aceito receber novidades do TeraBeer.',
True
)
allow_data_usage = st.checkbox(
'Permito que utilizem minhas respostas para melhorar recomendações futuras.',
True
)
st.text("")
if st.button('Enviar recomendações por email'):
with st.spinner(text='Enviando...'):
send_mail(email, name, markdown_list, image_list)
st.success('Enviado! Confira sua caixa de entrada e lixo eletrônico.')
if accept_beer_offers or allow_data_usage: # Try to send answers to database
db = DBFunctions()
try:
db.send_answers_to_db(
email=email,
name=name,
recommendations=recommendations,
df_paladar=df_paladar,
accept_beer_offers=accept_beer_offers,
allow_data_usage=allow_data_usage,
)
except KeyError:
pass
class _SessionState:
def __init__(self, session, hash_funcs):
"""Initialize SessionState instance."""
self.__dict__["_state"] = {
"data": {},
"hash": None,
"hasher": _CodeHasher(hash_funcs),
"is_rerun": False,
"session": session,
}
def __call__(self, **kwargs):
"""Initialize state data once."""
for item, value in kwargs.items():
if item not in self._state["data"]:
self._state["data"][item] = value
def __getitem__(self, item):
"""Return a saved state value, None if item is undefined."""
return self._state["data"].get(item, None)
def __getattr__(self, item):
"""Return a saved state value, None if item is undefined."""
return self._state["data"].get(item, None)
def __setitem__(self, item, value):
"""Set state value."""
self._state["data"][item] = value
def __setattr__(self, item, value):
"""Set state value."""
self._state["data"][item] = value
def clear(self):
"""Clear session state and request a rerun."""
self._state["data"].clear()
self._state["session"].request_rerun()
def sync(self):
"""Rerun the app with all state values up to date from the beginning to fix rollbacks."""
# Ensure to rerun only once to avoid infinite loops
# caused by a constantly changing state value at each run.
#
# Example: state.value += 1
if self._state["is_rerun"]:
self._state["is_rerun"] = False
elif self._state["hash"] is not None:
if self._state["hash"] != self._state["hasher"].to_bytes(self._state["data"], None):
self._state["is_rerun"] = True
self._state["session"].request_rerun()
self._state["hash"] = self._state["hasher"].to_bytes(self._state["data"], None)
def _get_session():
session_id = get_report_ctx().session_ | id
sess | identifier_name |
|
app.py | ("")
st.text("")
st.text("")
if st.button('Gerar recomendações'):
model = load_model('data/recommending_system')
if len(recommendable_beers) == 0:
st.error('Não temos nenhuma cerveja para te recomendar :/')
else:
with st.spinner(text='Aguarde um instante enquanto analisamos as suas respostas...'):
sleep(4) # Pretend making recommendations takes a while. Actually they are pretty fast
recommendations = model.recommend(
users=[-1],
k=3,
items=recommendable_beers,
new_observation_data=SFrame(new_observation_data),
exclude_known=exclude_known,
).to_dataframe()
# st.dataframe(recommendations)
if recommendations.empty and exclude_known:
st.error('Você conhece muitas cervejas ein?! Que tal desmarcar a caixa acima?')
else:
st.success('Pronto! Selecione no menu à esquerda a página Recomendações.')
sleep(3)
state.recommendations, state.paladar = recommendations, df_paladar
def display_sugestoes(state):
st.title(':beers: CERVEJAS RECOMENDADAS')
st.markdown('''
Estas são as cervejas artesanais brasileiras **mais recomendadas para você**.
Ao final, você poderá enviar a lista de cervejas para o seu e-mail.
''')
recommendations, df_paladar = state.recommendations, state.paladar
# st.dataframe(df_paladar)
# st.dataframe(recommendations)
if not isinstance(recommendations, pd.DataFrame):
st.error('Sua sessão expirou, responda novamente o formulário para ver as suas recomendações.')
else:
rename_beer_styles = {
'Cerveja Blonde': 'Blonde Ale',
'Cerveja Trigo': 'Weiss (Trigo)',
'Cerveja APA': 'American Pale Ale',
'Cerveja IPA': 'India Pale Ale',
'Cerveja Session IPA': 'Session IPA',
'Cerveja NEIPA': 'New England IPA',
'Cerveja Porter': 'Porter/Stout',
'Cerveja Malzbier': 'Dunkel/Malzbier',
'Cerveja Witbier': 'Witbier',
'Cerveja Sour': 'Sour/Fruit',
'Cerveja RIS': 'Russian Imperial Stout',
'Cerveja Lambic': 'Lambic'
}
recommendations.replace({'product': rename_beer_styles}, inplace=True)
with st.spinner('Buscando cervejas...'):
df_cervejas = get_beer_list()
recommended_labels = pd.merge(recommendations, df_cervejas, left_on='product', right_on='terabeer_style')
recommended_labels.sort_values(by=['score', 'ratings_avg'], ascending=[False, False])
# st.dataframe(recommended_labels)
origins = recommended_labels['origin_state'].unique().tolist()
origin_filter = st.multiselect("Filtrar por estado:", origins, default=origins)
filtered_labels = recommended_labels[recommended_labels['origin_state'].isin(origin_filter)]
max_beers = st.slider('Número máximo de rótulos por estilo', 1, 5, 3)
df_style_1 = filtered_labels[filtered_labels['rank'] == 1]
df_style_2 = filtered_labels[filtered_labels['rank'] == 2]
df_style_3 = filtered_labels[filtered_labels['rank'] == 3]
markdown_list = []
image_list = []
for df_style in [df_style_1, df_style_2, df_style_3]:
if not df_style.empty:
df_style.reset_index(drop=True, inplace=True)
style_name = df_style['terabeer_style'][0]
style_rank = df_style['rank'][0]
style_score = df_style['score'][0]
style_description = df_style['style_description'][0]
style_harmonization = df_style['harmonization'][0]
if style_harmonization:
harmonization_line = f'<br><br> <b>Harmoniza bem com</b>: {style_harmonization}'
else:
harmonization_line = ''
style_markdown = f"""
<div>
<br>
<h2>
Estilo {style_rank}: <b>{style_name}</b> ({style_score:.1%} recomendado para você)
</h2>
<br>
<p>
<b>Descrição</b>: {style_description} {harmonization_line}
</p>
<br>
</div>
"""
st.markdown(style_markdown, unsafe_allow_html=True)
markdown_list.append(style_markdown)
for index, row in df_style.iloc[0:max_beers, :].iterrows():
beer = row['name']
brewery = row['brand']
abv = row['abv']
ibu = row['ibu']
avg_rating = row['ratings_avg']
count_ratings = int(row['ratings_count'])
figure = row['figure']
ratings_source = row['ratings_source']
ratings_url = row['ratings_url']
origin_state = row['origin_state']
offer_url = row['offer_url']
discount_coupon = row['discount_coupon']
column1, column2 = st.beta_columns((1, 4))
with column1: # Column with beer labels
try:
st.image(f'fig/{figure}', use_column_width=True)
image_list.append(f'fig/{figure}')
markdown_list.append(
f"""
<br>
<div>
<img
src="cid:image{len(image_list)}"
alt="Logo"
style="width:200px;height:200px;">
</div>
"""
)
except FileNotFoundError:
st.image('fig/placeholder-image.jpg', use_column_width=True)
image_list.append('fig/placeholder-image.jpg')
markdown_list.append(
f"""
<br>
<div>
<img
src="cid:image{len(image_list)}"
alt="Logo"
style="width:200px;height:200px;">
</div>
"""
)
with column2: # Column with beer characteristics
ratings_source_url = f'<a href="{ratings_url}" target="_blank">{ratings_source}</a>'
ratings_line = f'{avg_rating:.3} ({count_ratings} avaliações no {ratings_source_url})'
ibu_line = f'{int(ibu)} unidades de amargor' if ibu > 0 else 'Indisponível'
discount_phrase = f'(Cupom de desconto: {discount_coupon})' if discount_coupon else ''
offer_line = f'<b><a href="{offer_url}" target="_blank">Quero!</a></b> {discount_phrase}'
beer_markdown = f"""
<div>
<h3>{beer} - {brewery}</h3>
<p>
<b>Origem</b>: {origin_state}<br>
<b>Nota média</b>: {ratings_line}<br>
<b>ABV</b>: {abv}% álcool <br>
<b>IBU</b>: {ibu_line} <br>
{offer_line}
</p>
</div>
"""
st.markdown(beer_markdown, unsafe_allow_html=True)
markdown_list.append(beer_markdown)
st.text("")
st.text("")
st.markdown("### :mailbox: Para receber a lista acima no seu e-mail, digite-o abaixo e aperte enter:")
email = st.text_input('')
if email:
st.markdown("### Qual seu nome?")
name = st.text_input(' ')
accept_beer_offers = st.checkbox(
'Aceito receber novidades do TeraBeer.',
True
)
allow_data_usage = st.checkbox(
'Permito que utilizem minhas respostas para melhorar recomendações futuras.',
True
)
st.text("")
if st.button('Enviar recomendações por email'):
with st.spinner(text='Enviando...'):
send_mail(email, name, markdown_list, image_list)
st.success('Enviado! Confira sua caixa de entrada e lixo eletrônico.')
if accept_beer_offers or allow_data_usage: # Try to send answers to database
db = DBFunctions()
try:
db.send_answers_to_db(
email=email,
name=name,
recommendations=recommendations,
df_paladar=df_paladar,
accept_beer_offers=accept_beer_offers,
allow_data_usage=allow_data_usage,
)
except KeyError:
pass
class _SessionState:
def __init__(self, session, hash_funcs):
"""Initialize SessionState instance."""
self.__dict__["_state"] = {
"data": {},
"hash": None,
| "hasher": _CodeHasher(hash_funcs),
"is_rerun": False,
"session": session,
}
| random_line_split |
|
packet_decoder.py | (self):
p = Packet()
p.direction = self.direction
p.ident = self.ident
p.data = self.data.copy()
return p
SLOT_EXTRA_DATA_IDS = [
0x103, 0x105, 0x15A, 0x167,
0x10C, 0x10D, 0x10E, 0x10F, 0x122,
0x110, 0x111, 0x112, 0x113, 0x123,
0x10B, 0x100, 0x101, 0x102, 0x124,
0x114, 0x115, 0x116, 0x117, 0x125,
0x11B, 0x11C, 0x11D, 0x11E, 0x126,
0x12A, 0x12B, 0x12C, 0x12D,
0x12E, 0x12F, 0x130, 0x131,
0x132, 0x133, 0x134, 0x135,
0x136, 0x137, 0x138, 0x139,
0x13A, 0x13B, 0x13C, 0x13D ]
data_types = {
"ubyte": ('B', 1),
"byte": ('b', 1),
"bool": ('?', 1),
"short": ('h', 2),
"float": ('f', 4),
"int": ('i', 4),
"double": ('d', 8),
"long": ('q', 8)
}
names = {
0x00: "Keep-alive",
0x01: "Login request",
0x02: "Handshake",
0x03: "Chat message",
0x04: "Time update",
0x05: "Entity Equipment",
0x06: "Spawn position",
0x07: "Use entity",
0x08: "Update health",
0x09: "Respawn",
0x0A: "Player",
0x0B: "Player position",
0x0C: "Player look",
0x0D: "Player position & look",
0x0E: "Player digging",
0x0F: "Player block placement",
0x10: "Holding change",
0x11: "Use bed",
0x12: "Animation",
0x13: "Entity action",
0x14: "Named entity spawn",
0x15: "Pickup spawn",
0x16: "Collect item",
0x17: "Add object or vehicle",
0x18: "Mob spawn",
0x19: "Entity: painting",
0x1A: "Experience Orb",
0x1B: "Stance update (DEPRECATED)",
0x1C: "Entity velocity",
0x1D: "Destroy entity",
0x1E: "Entity",
0x1F: "Entity relative move",
0x20: "Entity look",
0x21: "Entity look and relative move",
0x22: "Entity teleport",
0x23: "Entity head look",
0x26: "Entity status",
0x27: "Attach entity",
0x28: "Entity metadata",
0x29: "Entity effect",
0x2A: "Remove entity effect",
0x2B: "Experience",
0x32: "Pre-chunk",
0x33: "Map chunk",
0x34: "Multi-block change",
0x35: "Block change",
0x36: "Block action",
0x3C: "Explosion",
0x3D: "Sound effect",
0x46: "New or invalid state",
0x47: "Thunderbolt",
0x64: "Open window",
0x65: "Close window",
0x66: "Window click",
0x67: "Set slot",
0x68: "Window items",
0x69: "Update progress bar",
0x6A: "Transaction",
0x6B: "Creative inventory action",
0x6C: "Enchant Item",
0x82: "Update sign",
0x83: "Map data",
0x84: "Update tile entity",
0xCA: "Player Abilities",
0xC8: "Increment statistic",
0xC9: "Player List Item",
0xFA: "Plugin message",
0xFE: "Server list ping",
0xFF: "Disconnect"
}
structs = {
#Keep-alive
0x00: ("int", "keep_alive_id"),
#Login request
0x01: {
CLIENT_TO_SERVER: (
("int", "protocol_version"),
("string16", "username"),
("string16", "level type"),
("int", "server_mode"),
("int", "dimension"),
("byte", "difficulty"),
("ubyte", "world_height"),
("ubyte", "max_players")),
SERVER_TO_CLIENT: (
("int", "entity_id"),
("string16", "unknown"),
("string16", "level type"),
("int", "server_mode"),
("int", "dimension"),
("byte", "difficulty"),
("ubyte", "world_height"),
("ubyte", "max_players"))},
#Handshake
0x02: {
CLIENT_TO_SERVER: ("string16", "username"),
SERVER_TO_CLIENT: ("string16", "connection_hash")},
#Chat message
0x03: ("string16", "text"),
#Time update
0x04: ("long", "time"),
#Entity Equipment
0x05: (
("int", "entity_id"),
("short", "slot"),
("short", "item_id"),
("short", "damage")),
#Spawn position
0x06: (
("int", "x"),
("int", "y"),
("int", "z")),
#Use entity
0x07: (
("int", "subject_entity_id"),
("int", "object_entity_id"),
("bool", "left_click")),
#Update health
0x08: (
("short", "health"),
("short", "food"),
("float", "food_saturation")),
#Respawn
0x09: (
("int", "dimension"),
("byte", "difficulty"),
("byte", "server_mode"),
("short", "world_height"),
("string16", "level_type")),
#Player
0x0A: ("bool", "on_ground"),
#Player position
0x0B: (
("double", "x"),
("double", "y"),
("double", "stance"),
("double", "z"),
("bool", "on_ground")),
#Player look
0x0C: (
("float", "yaw"),
("float", "pitch"),
("bool", "on_ground")),
#Player position & look
0x0D: {
CLIENT_TO_SERVER: (
("double", "x"),
("double", "y"),
("double", "stance"),
("double", "z"),
("float", "yaw"),
("float", "pitch"),
("bool", "on_ground")),
SERVER_TO_CLIENT: (
("double", "x"),
("double", "stance"),
("double", "y"),
("double", "z"),
("float", "yaw"),
("float", "pitch"),
("bool", "on_ground"))},
#Player digging
0x0E: (
("byte", "status"),
("int", "x"),
("byte", "y"),
("int", "z"),
("byte", "face")),
| copy | identifier_name |
|
packet_decoder.py | mtype != 127:
mtype2 = mtype >> 5
t = 0
if mtype2 == 0: t = self.unpack('byte')
if mtype2 == 1: t = self.unpack('short')
if mtype2 == 2: t = self.unpack('int')
if mtype2 == 3: t = self.unpack('float')
if mtype2 == 4: t = self.unpack('string16')
if mtype2 == 5:
t = {}
t["id"] = self.unpack('short')
t["count"] = self.unpack('byte')
t["damage"] = self.unpack('short')
if mtype2 == 6:
t = []
for i in range(3):
s = self.unpack('int')
t.append(s)
t = (mtype, t)
o.append(t)
mtype = self.unpack('byte')
return o
def unpack_real(self, data_type, length):
"""A helper function for unpack(), it handles any data type that is understood by the struct module."""
if len(self.buff) < length:
raise IncompleteData()
o = struct.unpack_from('!'+data_type, self.buff)[0]
self.buff = self.buff[length:]
return o
def pack_real(self, data_type, data):
return struct.pack('!'+data_type, data)
def unpack_array(self, data_type, count):
a = []
for i in range(count):
a.append(self.unpack(data_type))
return a
def pack_array(self, data_type, data):
o = ''
for d in data:
o += self.pack(data_type, d)
return o
def unpack_array_fast(self, data_type, count):
data_type = data_types[data_type]
if len(self.buff) < count*data_type[1]:
raise IncompleteData()
o = struct.unpack_from(data_type[0]*count, self.buff)
self.buff = self.buff[count*data_type[1]:]
return o
def pack_array_fast(self, data_type, data):
data_type = data_types[data_type]
return struct.pack(data_type[0]*len(data), *data)
def read_packet(self):
"""Reads the bytestring in self.buff, and returns the first packet contained within it.
Sets self.buff to remaining bytestring.
If packet is incomplete, returns None. But may raise if it thinks a real malformed packet has been recieved.
"""
#self.debug("READ BUFFER SIZE: %d" % len(self.buff))
backup = self.buff[:]
packet = Packet()
try:
packet.direction = self.node
packet.ident = self.unpack('ubyte')
#Defined structs from huge dict
for datatype, name in self.get_struct(packet):
# this populates packet.data with {name: value}
packet.data[name] = self.unpack(datatype)
# I believe the following are packet-type specific fixes for variable-length packets.
#0x17
if packet.ident == 0x17:
if packet.data['unknown'] > 0:
packet.data['x2'] = self.unpack('short')
packet.data['y2'] = self.unpack('short')
packet.data['z2'] = self.unpack('short')
#0x33
if packet.ident in (0x33, 0x34):
packet.data['data'] = self.unpack_array_fast('byte', packet.data['data_size'])
del packet.data["data_size"]
# #0x34
# if packet.ident == 0x34:
# coords = self.unpack_array_fast('short', packet.data['data_size'])
# btype = self.unpack_array_fast('byte', packet.data['data_size'])
# metadata = self.unpack_array_fast('byte', packet.data['data_size'])
# packet.data["blocks"] = []
# for i in zip(coords, btype, metadata):
# block = {}
# block["x"] = i[0] >> 12
# block["z"] = 0x0F & i[0] >> 8
# block["y"] = 0xFF & i[0]
# block["type"] = i[1]
# block["metadata"] = i[2]
# packet.data["blocks"].append(block)
# del packet.data["data_size"]
#0x3C
if packet.ident == 0x3C:
records = self.unpack_array_fast('byte', packet.data['data_size']*3)
i = 0
packet.data["blocks"] = []
while i < packet.data['data_size']*3:
packet.data["blocks"].append(dict(zip(('x','y','z'), records[i:i+3])))
i+=3
del packet.data["data_size"]
#0x68
if packet.ident == 0x68:
packet.data["slots_data"] = self.unpack_array('slot', packet.data["data_size"])
del packet.data["data_size"]
#0x82:
if packet.ident == 0x82:
packet.data["text"] = []
for i in range(4):
packet.data["text"].append(packet.data["line_%s" % (i+1)])
#0x83
if packet.ident == 0x83:
packet.data["data"] = self.unpack_array_fast('byte', packet.data['data_size'])
del packet.data["data_size"]
# Sets packet.original to the byte string that the packet was decoded from.
packet.original = backup[:len(backup) - len(self.buff)]
return packet
except IncompleteData:
self.buff = backup
return None
except Exception, ex:
self.buff = backup
ex.args += (self.buff[20:],)
raise
def encode_packet(self, packet):
"""Takes a packet, and returns the encoded bytestring representing it."""
try:
output = self.pack('ubyte', packet.ident)
append = ''
#0x17
if packet.ident == 0x17:
if packet.data['unknown'] > 0:
for i in ('x2','y2','z2'):
append += self.pack('short', packet.data[i])
#0x33
if packet.ident in (0x33, 0x34):
packet.data['data_size'] = len(packet.data['data'])
append += self.pack_array_fast('byte', packet.data['data'])
# #0x34
# if packet.ident == 0x34:
# coords = []
# btypes = []
# metadata = []
# for i in packet.data['blocks']:
# coords.append(i['x'] << 12 | i['z'] << 8 | i['y'])
# btypes.append(i['type'])
# metadata.append(i['metadata'])
#
# packet.data['data_size'] = len(coords)
# append += self.pack_array_fast('short', coords)
# append += self.pack_array_fast('byte', btypes)
# append += self.pack_array_fast('byte', metadata)
#0x3C
if packet.ident == 0x3C:
array = []
for i in packet.data['blocks']:
array += [i['x'], i['y'], i['z']]
packet.data['data_size'] = len(packet.data['blocks'])
append += self.pack_array_fast('byte', array)
#0x68
if packet.ident == 0x68:
packet.data['data_size'] = len(packet.data['slots_data'])
append += self.pack_array('slot', packet.data['slots_data'])
#0x82: Sign
if packet.ident == 0x82:
for i in range(4):
packet.data["line_%s" % (i+1)] = packet.data["text"][i]
#0x83
if packet.ident == 0x83:
packet.data['data_size'] = len(packet.data['data'])
append += self.pack_array_fast('byte', packet.data['data'])
for i in self.get_struct(packet):
output += self.pack(i[0], packet.data[i[1]])
output += append
return output
except Exception:
raise
def stateless_unpack(buff, to_server):
"""A wrapper about the normal objects, that lets you unpack encoded packets easily.
Returns (packet, remaining_buff), where remaining_buff is the given buffer without the bytes eaten by the packet.
If no more packets can be read from buff, returns (None, buff).
"""
decoder = PacketDecoder(to_server)
decoder.buff = buff
packet = decoder.read_packet()
return packet, decoder.buff
def stateless_pack(packet, to_server):
| """A wrapper about the normal objects, that lets you pack decoded packets easily.
Returns the bytestring that represents the packet."""
decoder = PacketDecoder(to_server)
return decoder.encode_packet(packet) | identifier_body |
|
packet_decoder.py | ("short", "y_velocity"),
("short", "z_velocity")),
#Destroy entity
0x1D: ("int", "entity_id"),
#Entity
0x1E: ("int", "entity_id"),
#Entity relative move
0x1F: (
("int", "entity_id"),
("byte", "x_change"),
("byte", "y_change"),
("byte", "z_change")),
#Entity look
0x20: (
("int", "entity_id"),
("byte", "yaw"),
("byte", "pitch")),
#Entity look and relative move
0x21: (
("int", "entity_id"),
("byte", "x_change"),
("byte", "y_change"),
("byte", "z_change"),
("byte", "yaw"),
("byte", "pitch")),
#Entity teleport
0x22: (
("int", "entity_id"),
("int", "x"),
("int", "y"),
("int", "z"),
("byte", "yaw"),
("byte", "pitch")),
# Entity head look
0x23: (
("int", "entity_id"),
("byte", "head yaw")),
#Entity status
0x26: (
("int", "entity_id"),
("byte", "status")),
#Attach entity
0x27: (
("int", "subject_entity_id"),
("int", "object_entity_id")),
#Entity metadata
0x28: (
("int", "entity_id"),
("metadata", "metadata")),
# Entity effect
0x29: (
("int", "entity_id"),
("byte", "effect_id"),
("byte", "amplifier"),
("short", "duration")),
# remove entity effect
0x2A: (
("int", "entity_id"),
("byte", "effect_id")),
# Experience
0x2B: (
("float", "experience_bar"),
("short", "level"),
("short", "total_experience")),
#Pre-chunk
0x32: (
("int", "x"),
("int", "z"),
("bool", "load")),
#Map chunk
0x33: (
("int", "x"),
("int", "z"),
("bool", "contiguous"),
("short", "bitmap"),
("short", "add_bitmap"),
("int", "data_size"),
("int", "unknown")),
#Multi-block change
0x34: (
("int", "x_chunk"),
("int", "z_chunk"),
("short", "record_count"),
("int", "data_size")),
#Block change
0x35: (
("int", "x"),
("byte", "y"),
("int", "z"),
("byte", "id"),
("byte", "metadata")),
#Block action
0x36: (
("int", "x"),
("short", "y"),
("int", "z"),
("byte", "type_state"),
("byte", "pitch_direction")),
#Explosion
0x3C: (
("double", "x"),
("double", "y"),
("double", "z"),
("float", "unknown"),
("int", "data_size")),
#Sound effect
0x3D: (
("int", "effect_id"),
("int", "x"),
("byte", "y"),
("int", "z"),
("int", "extra")),
#New or invalid state
0x46: (
("byte", "reason"),
("byte", "gamemode")),
#Thunderbolt
0x47: (
("int", "entity_id"),
("bool", "unknown"),
("int", "x"),
("int", "y"),
("int", "z")),
#Open window
0x64: (
("byte", "window_id"),
("byte", "inventory_type"),
("string16", "window_title"),
("byte", "slots_count")),
#Close window
0x65: ("byte", "window_id"),
#Window click
0x66: (
("byte", "window_id"),
("short", "slot"),
("byte", "right_click"),
("short", "transaction_id"),
("bool", "shift"),
("slot", "slot_data")),
#Set slot
0x67: (
("byte", "window_id"),
("short", "slot"),
("slot", "slot_data")),
#Window items
0x68: (
("byte", "window_id"),
("short", "data_size")),
#Update progress bar
0x69: (
("byte", "window_id"),
("short", "progress_bar_type"),
("short", "progress")),
#Transaction
0x6A: (
("byte", "window_id"),
("short", "transaction_id"),
("bool", "accepted")),
# Creative Inventory Action
0x6B: (
("short", "slot"),
("slot", "slot_data")),
# Enchant Item
0x6C: (
("byte", "window_id"),
("byte", "enchantment")),
#Update sign
0x82: (
("int", "x"),
("short", "y"),
("int", "z"),
("string16", "line_1"),
("string16", "line_2"),
("string16", "line_3"),
("string16", "line_4")),
#Map data
0x83: (
("short", "unknown1"),
("short", "map_id"),
("ubyte", "data_size")),
#Update Tile Entity
0x84: (
("int", "x"),
("short", "y"),
("int", "z"),
("byte", "action"),
("int", "custom1"),
("int", "custom2"),
("int", "custom3")),
# Player Abilities
0xCA: (
("bool", "invulnerable"),
("bool", "flying"),
("bool", "can_fly"),
("bool", "instant_destroy")),
#Increment statistic
0xC8: (
("int", "statistic_id"),
("byte", "amount")),
# Player List Item
0xC9: (
("string16", "player_name"),
("bool", "online"),
("short", "ping")),
#Server list ping
0xFE: (),
#Disconnect
0xFF: ("string16", "reason")}
class PacketDecoder:
def __init__(self, to_server):
self.buff = ''
self.error_count = 0
self.node = CLIENT_TO_SERVER if to_server else SERVER_TO_CLIENT
self.iPacketCounter = 0
def get_struct(self, packet):
"""Reads ident and direction from packet, and returns the associated struct description from structs global.
Normalises return to be a ((str, str), ...)"""
o = structs[packet.ident]
if isinstance(o, dict):
o = o[packet.direction]
if len(o) and not isinstance(o[0], tuple):
o = (o),
return o
def pack(self, data_type, data):
if data_type in data_types:
format = data_types[data_type]
return self.pack_real(format[0], data)
if data_type == "string8": return self.pack("short", len(data)) + data
if data_type == "string16": return self.pack("short", len(data)) + data.encode('utf-16be')
if data_type == "slot":
o = self.pack('short', data['id'])
if data['id'] > 0:
o += self.pack('byte', data['amount'])
o += self.pack('short', data['damage'])
if 'extra' in data:
nbtdata = data['extra']
if nbtdata is None:
o += self.pack('short', -1)
else:
nbt_len = len(nbtdata)
o += self.pack('short', nbt_len)
o += nbtdata
return o
if data_type == "metadata":
o = ''
for mtype, val in data:
mtype2 = mtype >> 5
o += self.pack('byte', mtype)
if mtype2 == 0: | o += self.pack('byte', val) | conditional_block |
|
packet_decoder.py | ('byte', val['count'])
o += self.pack('short', val['damage'])
if mtype2 == 6:
for i in range(3):
o += self.pack('int', val[i])
o += self.pack('byte', 127)
return o
def unpack(self, data_type):
"""Reads buff (consuming bytes) and returns the unpacked value according to the given type."""
if data_type in data_types:
format = data_types[data_type]
return self.unpack_real(format[0], format[1])
if data_type == "string8":
length = self.unpack('short')
if length < 0:
raise Exception("Negative length for string")
if len(self.buff) < length:
raise IncompleteData()
string = self.buff[:length]
self.buff = self.buff[length:]
return string
if data_type == "string16":
length = self.unpack('short')
if length < 0:
raise Exception("Negative length for string")
if len(self.buff) < 2*length:
raise IncompleteData()
string = self.buff[:2*length].decode('utf-16be')
self.buff = self.buff[2*length:]
return string
if data_type == "slot":
o = {}
o["id"] = self.unpack('short')
if o["id"] > 0:
o["amount"] = self.unpack('byte')
o["damage"] = self.unpack('short')
if o["id"] in SLOT_EXTRA_DATA_IDS:
extra_len = self.unpack('short')
if extra_len <= 0:
o["extra"] = None
else:
if len(self.buff) < extra_len:
raise IncompleteData()
extra_buff = self.buff[:extra_len]
self.buff = self.buff[extra_len:]
o["extra"] = extra_buff
return o
if data_type == "metadata":
#[(17, 0), (0, 0), (16, -1)]
o = []
mtype = self.unpack('byte')
while mtype != 127:
mtype2 = mtype >> 5
t = 0
if mtype2 == 0: t = self.unpack('byte')
if mtype2 == 1: t = self.unpack('short')
if mtype2 == 2: t = self.unpack('int')
if mtype2 == 3: t = self.unpack('float')
if mtype2 == 4: t = self.unpack('string16')
if mtype2 == 5:
t = {}
t["id"] = self.unpack('short')
t["count"] = self.unpack('byte')
t["damage"] = self.unpack('short')
if mtype2 == 6:
t = []
for i in range(3):
s = self.unpack('int')
t.append(s)
t = (mtype, t)
o.append(t)
mtype = self.unpack('byte')
return o
def unpack_real(self, data_type, length):
"""A helper function for unpack(), it handles any data type that is understood by the struct module."""
if len(self.buff) < length:
raise IncompleteData()
o = struct.unpack_from('!'+data_type, self.buff)[0]
self.buff = self.buff[length:]
return o
def pack_real(self, data_type, data):
return struct.pack('!'+data_type, data)
def unpack_array(self, data_type, count):
a = []
for i in range(count):
a.append(self.unpack(data_type))
return a
def pack_array(self, data_type, data):
o = ''
for d in data:
o += self.pack(data_type, d)
return o
def unpack_array_fast(self, data_type, count):
data_type = data_types[data_type]
if len(self.buff) < count*data_type[1]:
raise IncompleteData()
o = struct.unpack_from(data_type[0]*count, self.buff)
self.buff = self.buff[count*data_type[1]:]
return o
def pack_array_fast(self, data_type, data):
data_type = data_types[data_type]
return struct.pack(data_type[0]*len(data), *data)
def read_packet(self):
"""Reads the bytestring in self.buff, and returns the first packet contained within it.
Sets self.buff to remaining bytestring.
If packet is incomplete, returns None. But may raise if it thinks a real malformed packet has been recieved.
"""
#self.debug("READ BUFFER SIZE: %d" % len(self.buff))
backup = self.buff[:]
packet = Packet()
try:
packet.direction = self.node
packet.ident = self.unpack('ubyte')
#Defined structs from huge dict
for datatype, name in self.get_struct(packet):
# this populates packet.data with {name: value}
packet.data[name] = self.unpack(datatype)
# I believe the following are packet-type specific fixes for variable-length packets.
#0x17
if packet.ident == 0x17:
if packet.data['unknown'] > 0:
packet.data['x2'] = self.unpack('short')
packet.data['y2'] = self.unpack('short')
packet.data['z2'] = self.unpack('short')
#0x33
if packet.ident in (0x33, 0x34):
packet.data['data'] = self.unpack_array_fast('byte', packet.data['data_size'])
del packet.data["data_size"]
# #0x34
# if packet.ident == 0x34:
# coords = self.unpack_array_fast('short', packet.data['data_size'])
# btype = self.unpack_array_fast('byte', packet.data['data_size'])
# metadata = self.unpack_array_fast('byte', packet.data['data_size'])
# packet.data["blocks"] = []
# for i in zip(coords, btype, metadata):
# block = {}
# block["x"] = i[0] >> 12
# block["z"] = 0x0F & i[0] >> 8
# block["y"] = 0xFF & i[0]
# block["type"] = i[1]
# block["metadata"] = i[2]
# packet.data["blocks"].append(block)
# del packet.data["data_size"]
#0x3C
if packet.ident == 0x3C:
records = self.unpack_array_fast('byte', packet.data['data_size']*3)
i = 0
packet.data["blocks"] = []
while i < packet.data['data_size']*3:
packet.data["blocks"].append(dict(zip(('x','y','z'), records[i:i+3])))
i+=3
del packet.data["data_size"]
#0x68
if packet.ident == 0x68:
packet.data["slots_data"] = self.unpack_array('slot', packet.data["data_size"])
del packet.data["data_size"]
#0x82:
if packet.ident == 0x82:
packet.data["text"] = []
for i in range(4):
packet.data["text"].append(packet.data["line_%s" % (i+1)])
#0x83
if packet.ident == 0x83:
packet.data["data"] = self.unpack_array_fast('byte', packet.data['data_size'])
del packet.data["data_size"]
# Sets packet.original to the byte string that the packet was decoded from.
packet.original = backup[:len(backup) - len(self.buff)]
return packet
except IncompleteData:
self.buff = backup
return None
except Exception, ex:
self.buff = backup
ex.args += (self.buff[20:],)
raise
def encode_packet(self, packet):
"""Takes a packet, and returns the encoded bytestring representing it."""
try:
output = self.pack('ubyte', packet.ident)
append = ''
#0x17
if packet.ident == 0x17:
if packet.data['unknown'] > 0:
for i in ('x2','y2','z2'):
append += self.pack('short', packet.data[i])
#0x33
if packet.ident in (0x33, 0x34):
packet.data['data_size'] = len(packet.data['data'])
append += self.pack_array_fast('byte', packet.data['data'])
# #0x34
# if packet.ident == 0x34: | # coords = []
# btypes = []
# metadata = []
# for i in packet.data['blocks']:
# coords.append(i['x'] << 12 | i['z'] << 8 | i['y']) | random_line_split |
|
lib.rs | from_millis(100)).await; // avoid deadlock
//! map1.cancel("Voltairine de Cleyre");
//! });
//!
//! task::block_on(handle);
//! # Ok(())
//! # }
//! ```
mod wait;
mod waker_set;
use std::borrow::Borrow;
use std::collections::hash_map::RandomState;
use std::future::Future;
use std::hash::{BuildHasher, Hash};
use std::mem;
use dashmap::mapref::entry::Entry::*;
use dashmap::mapref::one;
use dashmap::DashMap;
use wait::{Wait, WaitMut};
use waker_set::WakerSet;
use WaitEntry::*;
/// An asynchronous concurrent hashmap.
pub struct WaitMap<K, V, S = RandomState> {
map: DashMap<K, WaitEntry<V>, S>,
}
impl<K: Hash + Eq, V> WaitMap<K, V> {
/// Make a new `WaitMap` using the default hasher.
pub fn new() -> WaitMap<K, V> {
WaitMap {
map: DashMap::with_hasher(RandomState::default()),
}
}
}
impl<K: Hash + Eq, V, S: BuildHasher + Clone> WaitMap<K, V, S> {
/// Make a new `WaitMap` using a custom hasher.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::main;
/// # use waitmap::WaitMap;
/// use std::collections::hash_map::RandomState;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<i32, String> = WaitMap::with_hasher(RandomState::new());
/// # Ok(())
/// # }
/// ```
pub fn with_hasher(hasher: S) -> WaitMap<K, V, S> {
WaitMap {
map: DashMap::with_hasher(hasher),
}
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If there are any pending `wait` calls for this key, they are woken up.
///
/// If the map did have this key present, the value is updated and the old value is returned.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, sync::Arc, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: Arc<WaitMap<String, i32>> = Arc::new(WaitMap::new());
///
/// let insert_fut = async { map.insert("hi".to_string(), 0) };
/// let wait_fut = map.wait("hi");
///
/// let (insert_res, wait_res) = insert_fut.join(wait_fut).await;
/// assert!(insert_res.is_none());
/// assert!(wait_res.is_some());
/// # Ok(())
/// # }
/// ```
pub fn insert(&self, key: K, value: V) -> Option<V> {
match self.map.entry(key) {
Occupied(mut entry) => {
match mem::replace(entry.get_mut(), Filled(value)) {
Waiting(wakers) => {
drop(entry); // drop early to release lock before waking other tasks
wakers.wake();
None
}
Filled(value) => Some(value),
}
}
Vacant(slot) => {
slot.insert(Filled(value));
None
}
}
}
pub fn get<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<Ref<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(Ref {
inner: self.map.get(key)?,
})
}
pub fn get_mut<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<RefMut<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(RefMut {
inner: self.map.get_mut(key)?,
})
}
pub fn wait<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<Ref<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
Wait::new(&self.map, qey)
}
pub fn wait_mut<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<RefMut<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
WaitMut::new(&self.map, qey)
}
pub fn cancel<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) =
self.map.remove_if(
key,
|_, entry| {
if let Waiting(_) = entry {
true
} else {
false
}
},
)
{
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn remove<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) = self.map.remove(key) {
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn clear(&self) {
self.map.retain(|_, v| {
if let Waiting(wakers) = v {
mem::replace(wakers, WakerSet::new()).wake();
}
false
});
}
pub fn clear_not_waiting(&self) {
self.map
.retain(|_, v| if let Waiting(_) = v { true } else { false });
}
pub fn len(&self) -> usize {
self.map.len()
}
/// Cancels all outstanding `waits` on the map.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, stream, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<String, i32> = WaitMap::new();
/// let mut waitstream =
/// stream::from_iter(vec![map.wait("we"), map.wait("are"), map.wait("waiting")]);
///
/// map.cancel_all();
///
/// let mut num_cancelled = 0;
/// while let Some(wait_fut) = waitstream.next().await {
/// assert!(wait_fut.await.is_none());
/// num_cancelled += 1;
/// }
///
/// assert!(num_cancelled == 3);
/// # Ok(())
/// # }
/// ```
pub fn cancel_all(&self) {
self.map.retain(|_, entry| {
if let Waiting(wakers) = entry {
// NB: In theory, there is a deadlock risk: if a task is awoken before the
// retain is completed, it may see a waiting entry with an empty waker set,
// rather than a missing entry.
//
// However, this is prevented by the memory guards already present in DashMap.
// No other task will be able to view this entry until the guard on this shard
// has been dropped, which will not occur until this shard's unretained members
// have actually been removed.
mem::replace(wakers, WakerSet::new()).wake();
false
} else {
true
}
})
}
}
enum WaitEntry<V> {
Waiting(WakerSet),
Filled(V),
}
/// A shared reference to a `WaitMap` key-value pair.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::main;
/// # use waitmap::{Ref, WaitMap};
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<String, i32> = WaitMap::new(); | /// let emma = "Emma Goldman".to_string();
///
/// map.insert(emma.clone(), 0);
/// let kv: Ref<String, i32, _> = map.get(&emma).unwrap();
/// | random_line_split |
|
lib.rs | Duration;
//! # use std::sync::Arc;
//! # use waitmap::WaitMap;
//! # #[async_std::main]
//! # async fn main() -> std::io::Result<()> {
//! let map: Arc<WaitMap<String, String>> = Arc::new(WaitMap::new());
//! let map1 = map.clone();
//!
//! let handle = task::spawn(async move {
//! let result = map.wait("Voltairine de Cleyre").await;
//! assert!(result.is_none());
//! });
//!
//! task::spawn(async move {
//! task::sleep(Duration::from_millis(100)).await; // avoid deadlock
//! map1.cancel("Voltairine de Cleyre");
//! });
//!
//! task::block_on(handle);
//! # Ok(())
//! # }
//! ```
mod wait;
mod waker_set;
use std::borrow::Borrow;
use std::collections::hash_map::RandomState;
use std::future::Future;
use std::hash::{BuildHasher, Hash};
use std::mem;
use dashmap::mapref::entry::Entry::*;
use dashmap::mapref::one;
use dashmap::DashMap;
use wait::{Wait, WaitMut};
use waker_set::WakerSet;
use WaitEntry::*;
/// An asynchronous concurrent hashmap.
pub struct WaitMap<K, V, S = RandomState> {
map: DashMap<K, WaitEntry<V>, S>,
}
impl<K: Hash + Eq, V> WaitMap<K, V> {
/// Make a new `WaitMap` using the default hasher.
pub fn new() -> WaitMap<K, V> {
WaitMap {
map: DashMap::with_hasher(RandomState::default()),
}
}
}
impl<K: Hash + Eq, V, S: BuildHasher + Clone> WaitMap<K, V, S> {
/// Make a new `WaitMap` using a custom hasher.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::main;
/// # use waitmap::WaitMap;
/// use std::collections::hash_map::RandomState;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<i32, String> = WaitMap::with_hasher(RandomState::new());
/// # Ok(())
/// # }
/// ```
pub fn with_hasher(hasher: S) -> WaitMap<K, V, S> {
WaitMap {
map: DashMap::with_hasher(hasher),
}
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If there are any pending `wait` calls for this key, they are woken up.
///
/// If the map did have this key present, the value is updated and the old value is returned.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, sync::Arc, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: Arc<WaitMap<String, i32>> = Arc::new(WaitMap::new());
///
/// let insert_fut = async { map.insert("hi".to_string(), 0) };
/// let wait_fut = map.wait("hi");
///
/// let (insert_res, wait_res) = insert_fut.join(wait_fut).await;
/// assert!(insert_res.is_none());
/// assert!(wait_res.is_some());
/// # Ok(())
/// # }
/// ```
pub fn insert(&self, key: K, value: V) -> Option<V> {
match self.map.entry(key) {
Occupied(mut entry) => {
match mem::replace(entry.get_mut(), Filled(value)) {
Waiting(wakers) => {
drop(entry); // drop early to release lock before waking other tasks
wakers.wake();
None
}
Filled(value) => Some(value),
}
}
Vacant(slot) => {
slot.insert(Filled(value));
None
}
}
}
pub fn get<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<Ref<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(Ref {
inner: self.map.get(key)?,
})
}
pub fn get_mut<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<RefMut<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(RefMut {
inner: self.map.get_mut(key)?,
})
}
pub fn wait<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<Ref<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
Wait::new(&self.map, qey)
}
pub fn wait_mut<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<RefMut<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
WaitMut::new(&self.map, qey)
}
pub fn cancel<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) =
self.map.remove_if(
key,
|_, entry| {
if let Waiting(_) = entry {
true
} else {
false
}
},
)
{
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn remove<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) = self.map.remove(key) {
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn clear(&self) {
self.map.retain(|_, v| {
if let Waiting(wakers) = v {
mem::replace(wakers, WakerSet::new()).wake();
}
false
});
}
pub fn clear_not_waiting(&self) {
self.map
.retain(|_, v| if let Waiting(_) = v { true } else { false });
}
pub fn len(&self) -> usize {
self.map.len()
}
/// Cancels all outstanding `waits` on the map.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, stream, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<String, i32> = WaitMap::new();
/// let mut waitstream =
/// stream::from_iter(vec![map.wait("we"), map.wait("are"), map.wait("waiting")]);
///
/// map.cancel_all();
///
/// let mut num_cancelled = 0;
/// while let Some(wait_fut) = waitstream.next().await {
/// assert!(wait_fut.await.is_none());
/// num_cancelled += 1;
/// }
///
/// assert!(num_cancelled == 3);
/// # Ok(())
/// # }
/// ```
pub fn cancel_all(&self) |
}
enum WaitEntry<V> {
Waiting(WakerSet),
Filled(V),
}
/// A shared reference to a | {
self.map.retain(|_, entry| {
if let Waiting(wakers) = entry {
// NB: In theory, there is a deadlock risk: if a task is awoken before the
// retain is completed, it may see a waiting entry with an empty waker set,
// rather than a missing entry.
//
// However, this is prevented by the memory guards already present in DashMap.
// No other task will be able to view this entry until the guard on this shard
// has been dropped, which will not occur until this shard's unretained members
// have actually been removed.
mem::replace(wakers, WakerSet::new()).wake();
false
} else {
true
}
})
} | identifier_body |
lib.rs | use std::collections::hash_map::RandomState;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<i32, String> = WaitMap::with_hasher(RandomState::new());
/// # Ok(())
/// # }
/// ```
pub fn with_hasher(hasher: S) -> WaitMap<K, V, S> {
WaitMap {
map: DashMap::with_hasher(hasher),
}
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If there are any pending `wait` calls for this key, they are woken up.
///
/// If the map did have this key present, the value is updated and the old value is returned.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, sync::Arc, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: Arc<WaitMap<String, i32>> = Arc::new(WaitMap::new());
///
/// let insert_fut = async { map.insert("hi".to_string(), 0) };
/// let wait_fut = map.wait("hi");
///
/// let (insert_res, wait_res) = insert_fut.join(wait_fut).await;
/// assert!(insert_res.is_none());
/// assert!(wait_res.is_some());
/// # Ok(())
/// # }
/// ```
pub fn insert(&self, key: K, value: V) -> Option<V> {
match self.map.entry(key) {
Occupied(mut entry) => {
match mem::replace(entry.get_mut(), Filled(value)) {
Waiting(wakers) => {
drop(entry); // drop early to release lock before waking other tasks
wakers.wake();
None
}
Filled(value) => Some(value),
}
}
Vacant(slot) => {
slot.insert(Filled(value));
None
}
}
}
pub fn get<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<Ref<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(Ref {
inner: self.map.get(key)?,
})
}
pub fn get_mut<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<RefMut<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(RefMut {
inner: self.map.get_mut(key)?,
})
}
pub fn wait<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<Ref<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
Wait::new(&self.map, qey)
}
pub fn wait_mut<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<RefMut<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
WaitMut::new(&self.map, qey)
}
pub fn cancel<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) =
self.map.remove_if(
key,
|_, entry| {
if let Waiting(_) = entry {
true
} else {
false
}
},
)
{
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn remove<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) = self.map.remove(key) {
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn clear(&self) {
self.map.retain(|_, v| {
if let Waiting(wakers) = v {
mem::replace(wakers, WakerSet::new()).wake();
}
false
});
}
pub fn clear_not_waiting(&self) {
self.map
.retain(|_, v| if let Waiting(_) = v { true } else { false });
}
pub fn len(&self) -> usize {
self.map.len()
}
/// Cancels all outstanding `waits` on the map.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, stream, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<String, i32> = WaitMap::new();
/// let mut waitstream =
/// stream::from_iter(vec![map.wait("we"), map.wait("are"), map.wait("waiting")]);
///
/// map.cancel_all();
///
/// let mut num_cancelled = 0;
/// while let Some(wait_fut) = waitstream.next().await {
/// assert!(wait_fut.await.is_none());
/// num_cancelled += 1;
/// }
///
/// assert!(num_cancelled == 3);
/// # Ok(())
/// # }
/// ```
pub fn cancel_all(&self) {
self.map.retain(|_, entry| {
if let Waiting(wakers) = entry {
// NB: In theory, there is a deadlock risk: if a task is awoken before the
// retain is completed, it may see a waiting entry with an empty waker set,
// rather than a missing entry.
//
// However, this is prevented by the memory guards already present in DashMap.
// No other task will be able to view this entry until the guard on this shard
// has been dropped, which will not occur until this shard's unretained members
// have actually been removed.
mem::replace(wakers, WakerSet::new()).wake();
false
} else {
true
}
})
}
}
enum WaitEntry<V> {
Waiting(WakerSet),
Filled(V),
}
/// A shared reference to a `WaitMap` key-value pair.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::main;
/// # use waitmap::{Ref, WaitMap};
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<String, i32> = WaitMap::new();
/// let emma = "Emma Goldman".to_string();
///
/// map.insert(emma.clone(), 0);
/// let kv: Ref<String, i32, _> = map.get(&emma).unwrap();
///
/// assert!(*kv.key() == emma);
/// assert!(*kv.value() == 0);
/// assert!(kv.pair() == (&"Emma Goldman".to_string(), &0));
/// # Ok(())
/// # }
/// ```
pub struct Ref<'a, K, V, S> {
inner: one::Ref<'a, K, WaitEntry<V>, S>,
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> Ref<'a, K, V, S> {
pub fn key(&self) -> &K {
self.inner.key()
}
pub fn value(&self) -> &V {
match self.inner.value() {
Filled(value) => value,
_ => panic!(),
}
}
pub fn pair(&self) -> (&K, &V) {
(self.key(), self.value())
}
}
/// An exclusive reference to a `WaitMap` key-value pair.
pub struct RefMut<'a, K, V, S> {
inner: one::RefMut<'a, K, WaitEntry<V>, S>,
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> RefMut<'a, K, V, S> {
pub fn key(&self) -> &K {
self.inner.key()
}
pub fn value(&self) -> &V {
match self.inner.value() {
Filled(value) => value,
_ => panic!(),
}
}
pub fn | value_mut | identifier_name |
|
lib.rs | Duration;
//! # use std::sync::Arc;
//! # use waitmap::WaitMap;
//! # #[async_std::main]
//! # async fn main() -> std::io::Result<()> {
//! let map: Arc<WaitMap<String, String>> = Arc::new(WaitMap::new());
//! let map1 = map.clone();
//!
//! let handle = task::spawn(async move {
//! let result = map.wait("Voltairine de Cleyre").await;
//! assert!(result.is_none());
//! });
//!
//! task::spawn(async move {
//! task::sleep(Duration::from_millis(100)).await; // avoid deadlock
//! map1.cancel("Voltairine de Cleyre");
//! });
//!
//! task::block_on(handle);
//! # Ok(())
//! # }
//! ```
mod wait;
mod waker_set;
use std::borrow::Borrow;
use std::collections::hash_map::RandomState;
use std::future::Future;
use std::hash::{BuildHasher, Hash};
use std::mem;
use dashmap::mapref::entry::Entry::*;
use dashmap::mapref::one;
use dashmap::DashMap;
use wait::{Wait, WaitMut};
use waker_set::WakerSet;
use WaitEntry::*;
/// An asynchronous concurrent hashmap.
pub struct WaitMap<K, V, S = RandomState> {
map: DashMap<K, WaitEntry<V>, S>,
}
impl<K: Hash + Eq, V> WaitMap<K, V> {
/// Make a new `WaitMap` using the default hasher.
pub fn new() -> WaitMap<K, V> {
WaitMap {
map: DashMap::with_hasher(RandomState::default()),
}
}
}
impl<K: Hash + Eq, V, S: BuildHasher + Clone> WaitMap<K, V, S> {
/// Make a new `WaitMap` using a custom hasher.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::main;
/// # use waitmap::WaitMap;
/// use std::collections::hash_map::RandomState;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<i32, String> = WaitMap::with_hasher(RandomState::new());
/// # Ok(())
/// # }
/// ```
pub fn with_hasher(hasher: S) -> WaitMap<K, V, S> {
WaitMap {
map: DashMap::with_hasher(hasher),
}
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If there are any pending `wait` calls for this key, they are woken up.
///
/// If the map did have this key present, the value is updated and the old value is returned.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, sync::Arc, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: Arc<WaitMap<String, i32>> = Arc::new(WaitMap::new());
///
/// let insert_fut = async { map.insert("hi".to_string(), 0) };
/// let wait_fut = map.wait("hi");
///
/// let (insert_res, wait_res) = insert_fut.join(wait_fut).await;
/// assert!(insert_res.is_none());
/// assert!(wait_res.is_some());
/// # Ok(())
/// # }
/// ```
pub fn insert(&self, key: K, value: V) -> Option<V> {
match self.map.entry(key) {
Occupied(mut entry) => {
match mem::replace(entry.get_mut(), Filled(value)) {
Waiting(wakers) => {
drop(entry); // drop early to release lock before waking other tasks
wakers.wake();
None
}
Filled(value) => Some(value),
}
}
Vacant(slot) => {
slot.insert(Filled(value));
None
}
}
}
pub fn get<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<Ref<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(Ref {
inner: self.map.get(key)?,
})
}
pub fn get_mut<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> Option<RefMut<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(RefMut {
inner: self.map.get_mut(key)?,
})
}
pub fn wait<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<Ref<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
Wait::new(&self.map, qey)
}
pub fn wait_mut<'a: 'f, 'b: 'f, 'f, Q: ?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<RefMut<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
WaitMut::new(&self.map, qey)
}
pub fn cancel<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) =
self.map.remove_if(
key,
|_, entry| {
if let Waiting(_) = entry {
true
} else {
false
}
},
)
{
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn remove<Q: ?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) = self.map.remove(key) {
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn clear(&self) {
self.map.retain(|_, v| {
if let Waiting(wakers) = v {
mem::replace(wakers, WakerSet::new()).wake();
}
false
});
}
pub fn clear_not_waiting(&self) {
self.map
.retain(|_, v| if let Waiting(_) = v | else { false });
}
pub fn len(&self) -> usize {
self.map.len()
}
/// Cancels all outstanding `waits` on the map.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, stream, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<String, i32> = WaitMap::new();
/// let mut waitstream =
/// stream::from_iter(vec![map.wait("we"), map.wait("are"), map.wait("waiting")]);
///
/// map.cancel_all();
///
/// let mut num_cancelled = 0;
/// while let Some(wait_fut) = waitstream.next().await {
/// assert!(wait_fut.await.is_none());
/// num_cancelled += 1;
/// }
///
/// assert!(num_cancelled == 3);
/// # Ok(())
/// # }
/// ```
pub fn cancel_all(&self) {
self.map.retain(|_, entry| {
if let Waiting(wakers) = entry {
// NB: In theory, there is a deadlock risk: if a task is awoken before the
// retain is completed, it may see a waiting entry with an empty waker set,
// rather than a missing entry.
//
// However, this is prevented by the memory guards already present in DashMap.
// No other task will be able to view this entry until the guard on this shard
// has been dropped, which will not occur until this shard's unretained members
// have actually been removed.
mem::replace(wakers, WakerSet::new()).wake();
false
} else {
true
}
})
}
}
enum WaitEntry<V> {
Waiting(WakerSet),
Filled(V),
}
/// A shared reference to a ` | { true } | conditional_block |
controller_test.go | Equals, name)
c.Assert(app.ID, Not(Equals), "")
if id != "" {
c.Assert(app.ID, Equals, id)
}
c.Assert(app.Protected, Equals, true)
c.Assert(app.Meta["foo"], Equals, "bar")
gotApp := &ct.App{}
res, err := s.Get("/apps/"+app.ID, gotApp)
c.Assert(err, IsNil)
c.Assert(gotApp, DeepEquals, app)
res, err = s.Get("/apps/"+app.Name, gotApp)
c.Assert(err, IsNil)
c.Assert(gotApp, DeepEquals, app)
res, err = s.Get("/apps/fail"+app.ID, gotApp)
c.Assert(res.StatusCode, Equals, 404)
}
}
func (s *S) TestUpdateApp(c *C) {
meta := map[string]string{"foo": "bar"}
app := s.createTestApp(c, &ct.App{Name: "update-app", Meta: meta})
c.Assert(app.Protected, Equals, false)
c.Assert(app.Meta, DeepEquals, meta)
gotApp := &ct.App{}
res, err := s.Post("/apps/"+app.Name, map[string]bool{"protected": true}, gotApp)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
c.Assert(gotApp.Protected, Equals, true)
c.Assert(gotApp.Meta, DeepEquals, meta)
meta = map[string]string{"foo": "baz", "bar": "foo"}
res, err = s.Post("/apps/"+app.ID, map[string]interface{}{"protected": false, "meta": meta}, gotApp)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
c.Assert(gotApp.Protected, Equals, false)
c.Assert(gotApp.Meta, DeepEquals, meta)
res, err = s.Get("/apps/"+app.ID, gotApp)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
c.Assert(gotApp.Protected, Equals, false)
c.Assert(gotApp.Meta, DeepEquals, meta)
}
func (s *S) TestDeleteApp(c *C) {
app := s.createTestApp(c, &ct.App{Name: "delete-app"})
path := "/apps/" + app.ID
res, err := s.Delete(path)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
res, err = s.Get(path, app)
c.Assert(res.StatusCode, Equals, 404)
}
func (s *S) TestRecreateApp(c *C) {
app := s.createTestApp(c, &ct.App{Name: "recreate-app"})
// Post a duplicate
res, err := s.Post("/apps", app, &ct.App{})
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 500) // TODO: This should probably be a 4xx error
// Delete the original
path := "/apps/" + app.ID
res, err = s.Delete(path)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
// Create the same key
app = s.createTestApp(c, &ct.App{Name: "recreate-app"})
c.Assert(app.Name, Equals, "recreate-app")
}
func (s *S) TestProtectedApp(c *C) {
app := s.createTestApp(c, &ct.App{Name: "protected-app", Protected: true})
release := s.createTestRelease(c, &ct.Release{
Processes: map[string]ct.ProcessType{"web": {}, "worker": {}},
})
path := formationPath(app.ID, release.ID)
for _, t := range []struct {
procs map[string]int
status int
}{
{nil, 400},
{map[string]int{"web": 1}, 400},
{map[string]int{"worker": 1, "web": 0}, 400},
{map[string]int{"worker": 1, "web": 1}, 200},
} {
res, err := s.Put(path, &ct.Formation{Processes: t.procs}, nil)
c.Assert(err, IsNil)
res.Body.Close()
c.Assert(res.StatusCode, Equals, t.status)
}
}
func (s *S) createTestArtifact(c *C, in *ct.Artifact) *ct.Artifact {
out := &ct.Artifact{}
res, err := s.Post("/artifacts", in, out)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
return out
}
func (s *S) TestCreateArtifact(c *C) {
for i, id := range []string{"", utils.UUID()} {
in := &ct.Artifact{
ID: id,
Type: "docker-image",
URI: fmt.Sprintf("docker://flynn/host?id=adsf%d", i),
}
out := s.createTestArtifact(c, in)
c.Assert(out.Type, Equals, in.Type)
c.Assert(out.URI, Equals, in.URI)
c.Assert(out.ID, Not(Equals), "")
if id != "" {
c.Assert(out.ID, Equals, id)
}
gotArtifact := &ct.Artifact{}
res, err := s.Get("/artifacts/"+out.ID, gotArtifact)
c.Assert(err, IsNil)
c.Assert(gotArtifact, DeepEquals, out)
res, err = s.Get("/artifacts/fail"+out.ID, gotArtifact)
c.Assert(res.StatusCode, Equals, 404)
}
}
func (s *S) createTestRelease(c *C, in *ct.Release) *ct.Release {
if in.ArtifactID == "" {
in.ArtifactID = s.createTestArtifact(c, &ct.Artifact{}).ID
}
out := &ct.Release{}
res, err := s.Post("/releases", in, out)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
return out
}
func (s *S) createTestKey(c *C, in *ct.Key) *ct.Key {
out := &ct.Key{}
res, err := s.Post("/keys", in, out)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
return out
}
func (s *S) TestCreateRelease(c *C) {
for _, id := range []string{"", utils.UUID()} {
in := &ct.Release{ID: id}
out := s.createTestRelease(c, in)
c.Assert(out.ArtifactID, Equals, in.ArtifactID)
if id != "" {
c.Assert(out.ID, Equals, id)
}
gotRelease := &ct.Release{}
res, err := s.Get("/releases/"+out.ID, gotRelease)
c.Assert(err, IsNil) | res, err = s.Get("/releases/fail"+out.ID, gotRelease)
c.Assert(res.StatusCode, Equals, 404)
}
}
func (s *S) TestCreateFormation(c *C) {
for i, useName := range []bool{false, true} {
release := s.createTestRelease(c, &ct.Release{})
app := s.createTestApp(c, &ct.App{Name: fmt.Sprintf("create-formation-%d", i)})
in := &ct.Formation{ReleaseID: release.ID, AppID: app.ID, Processes: map[string]int{"web": 1}}
if useName {
in.AppID = app.Name
}
out := s.createTestFormation(c, in)
c.Assert(out.AppID, Equals, app.ID)
c.Assert(out.ReleaseID, Equals, release.ID)
c.Assert(out.Processes["web"], Equals, 1)
gotFormation := &ct.Formation{}
var path string
if useName {
path = formationPath(app.Name, release.ID)
} else {
path = formationPath(app.ID, release.ID)
}
res, err := s.Get(path, gotFormation)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
c.Assert(gotFormation, DeepEquals, out)
res, err = s.Get(path+"fail", gotFormation)
c.Assert(res.StatusCode, Equals, 404, Commentf("path:%s formation:", path+"fail"))
}
}
func (s *S) createTestFormation(c *C, formation *ct.Formation) *ct.Formation {
path := formationPath(formation.AppID, formation.ReleaseID)
formation.AppID = ""
formation.ReleaseID = ""
out := &ct.Formation{}
res, err := s.Put(path, formation, out)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
return out
}
func formationPath(appID, releaseID string) string {
return "/apps/" + appID + "/formations/" + releaseID
}
func (s *S) TestDeleteFormation(c *C) {
for i, useName := range []bool{false, true} {
release := s.createTestRelease(c, &ct.Release{})
app := s.createTestApp(c, &ct.App{Name: fmt.Sprintf("delete-formation-%d", i)})
out := | c.Assert(gotRelease, DeepEquals, out)
| random_line_split |
controller_test.go | c.Assert(err, IsNil)
c.Assert(gotApp, DeepEquals, app)
res, err = s.Get("/apps/fail"+app.ID, gotApp)
c.Assert(res.StatusCode, Equals, 404)
}
}
func (s *S) TestUpdateApp(c *C) {
meta := map[string]string{"foo": "bar"}
app := s.createTestApp(c, &ct.App{Name: "update-app", Meta: meta})
c.Assert(app.Protected, Equals, false)
c.Assert(app.Meta, DeepEquals, meta)
gotApp := &ct.App{}
res, err := s.Post("/apps/"+app.Name, map[string]bool{"protected": true}, gotApp)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
c.Assert(gotApp.Protected, Equals, true)
c.Assert(gotApp.Meta, DeepEquals, meta)
meta = map[string]string{"foo": "baz", "bar": "foo"}
res, err = s.Post("/apps/"+app.ID, map[string]interface{}{"protected": false, "meta": meta}, gotApp)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
c.Assert(gotApp.Protected, Equals, false)
c.Assert(gotApp.Meta, DeepEquals, meta)
res, err = s.Get("/apps/"+app.ID, gotApp)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
c.Assert(gotApp.Protected, Equals, false)
c.Assert(gotApp.Meta, DeepEquals, meta)
}
func (s *S) TestDeleteApp(c *C) {
app := s.createTestApp(c, &ct.App{Name: "delete-app"})
path := "/apps/" + app.ID
res, err := s.Delete(path)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
res, err = s.Get(path, app)
c.Assert(res.StatusCode, Equals, 404)
}
func (s *S) TestRecreateApp(c *C) {
app := s.createTestApp(c, &ct.App{Name: "recreate-app"})
// Post a duplicate
res, err := s.Post("/apps", app, &ct.App{})
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 500) // TODO: This should probably be a 4xx error
// Delete the original
path := "/apps/" + app.ID
res, err = s.Delete(path)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
// Create the same key
app = s.createTestApp(c, &ct.App{Name: "recreate-app"})
c.Assert(app.Name, Equals, "recreate-app")
}
func (s *S) TestProtectedApp(c *C) {
app := s.createTestApp(c, &ct.App{Name: "protected-app", Protected: true})
release := s.createTestRelease(c, &ct.Release{
Processes: map[string]ct.ProcessType{"web": {}, "worker": {}},
})
path := formationPath(app.ID, release.ID)
for _, t := range []struct {
procs map[string]int
status int
}{
{nil, 400},
{map[string]int{"web": 1}, 400},
{map[string]int{"worker": 1, "web": 0}, 400},
{map[string]int{"worker": 1, "web": 1}, 200},
} {
res, err := s.Put(path, &ct.Formation{Processes: t.procs}, nil)
c.Assert(err, IsNil)
res.Body.Close()
c.Assert(res.StatusCode, Equals, t.status)
}
}
func (s *S) createTestArtifact(c *C, in *ct.Artifact) *ct.Artifact {
out := &ct.Artifact{}
res, err := s.Post("/artifacts", in, out)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
return out
}
func (s *S) TestCreateArtifact(c *C) {
for i, id := range []string{"", utils.UUID()} {
in := &ct.Artifact{
ID: id,
Type: "docker-image",
URI: fmt.Sprintf("docker://flynn/host?id=adsf%d", i),
}
out := s.createTestArtifact(c, in)
c.Assert(out.Type, Equals, in.Type)
c.Assert(out.URI, Equals, in.URI)
c.Assert(out.ID, Not(Equals), "")
if id != "" {
c.Assert(out.ID, Equals, id)
}
gotArtifact := &ct.Artifact{}
res, err := s.Get("/artifacts/"+out.ID, gotArtifact)
c.Assert(err, IsNil)
c.Assert(gotArtifact, DeepEquals, out)
res, err = s.Get("/artifacts/fail"+out.ID, gotArtifact)
c.Assert(res.StatusCode, Equals, 404)
}
}
func (s *S) createTestRelease(c *C, in *ct.Release) *ct.Release {
if in.ArtifactID == "" {
in.ArtifactID = s.createTestArtifact(c, &ct.Artifact{}).ID
}
out := &ct.Release{}
res, err := s.Post("/releases", in, out)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
return out
}
func (s *S) createTestKey(c *C, in *ct.Key) *ct.Key {
out := &ct.Key{}
res, err := s.Post("/keys", in, out)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
return out
}
func (s *S) TestCreateRelease(c *C) {
for _, id := range []string{"", utils.UUID()} {
in := &ct.Release{ID: id}
out := s.createTestRelease(c, in)
c.Assert(out.ArtifactID, Equals, in.ArtifactID)
if id != "" {
c.Assert(out.ID, Equals, id)
}
gotRelease := &ct.Release{}
res, err := s.Get("/releases/"+out.ID, gotRelease)
c.Assert(err, IsNil)
c.Assert(gotRelease, DeepEquals, out)
res, err = s.Get("/releases/fail"+out.ID, gotRelease)
c.Assert(res.StatusCode, Equals, 404)
}
}
func (s *S) TestCreateFormation(c *C) {
for i, useName := range []bool{false, true} {
release := s.createTestRelease(c, &ct.Release{})
app := s.createTestApp(c, &ct.App{Name: fmt.Sprintf("create-formation-%d", i)})
in := &ct.Formation{ReleaseID: release.ID, AppID: app.ID, Processes: map[string]int{"web": 1}}
if useName {
in.AppID = app.Name
}
out := s.createTestFormation(c, in)
c.Assert(out.AppID, Equals, app.ID)
c.Assert(out.ReleaseID, Equals, release.ID)
c.Assert(out.Processes["web"], Equals, 1)
gotFormation := &ct.Formation{}
var path string
if useName {
path = formationPath(app.Name, release.ID)
} else {
path = formationPath(app.ID, release.ID)
}
res, err := s.Get(path, gotFormation)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
c.Assert(gotFormation, DeepEquals, out)
res, err = s.Get(path+"fail", gotFormation)
c.Assert(res.StatusCode, Equals, 404, Commentf("path:%s formation:", path+"fail"))
}
}
func (s *S) createTestFormation(c *C, formation *ct.Formation) *ct.Formation {
path := formationPath(formation.AppID, formation.ReleaseID)
formation.AppID = ""
formation.ReleaseID = ""
out := &ct.Formation{}
res, err := s.Put(path, formation, out)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
return out
}
func formationPath(appID, releaseID string) string {
return "/apps/" + appID + "/formations/" + releaseID
}
func (s *S) TestDeleteFormation(c *C) {
for i, useName := range []bool{false, true} | {
release := s.createTestRelease(c, &ct.Release{})
app := s.createTestApp(c, &ct.App{Name: fmt.Sprintf("delete-formation-%d", i)})
out := s.createTestFormation(c, &ct.Formation{ReleaseID: release.ID, AppID: app.ID})
var path string
if useName {
path = formationPath(app.Name, release.ID)
} else {
path = formationPath(app.ID, release.ID)
}
res, err := s.Delete(path)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
res, err = s.Get(path, out)
c.Assert(res.StatusCode, Equals, 404)
} | conditional_block |
|
controller_test.go | ct.Formation{ReleaseID: release.ID, AppID: app.ID, Processes: map[string]int{"web": 1}}
if useName {
in.AppID = app.Name
}
out := s.createTestFormation(c, in)
c.Assert(out.AppID, Equals, app.ID)
c.Assert(out.ReleaseID, Equals, release.ID)
c.Assert(out.Processes["web"], Equals, 1)
gotFormation := &ct.Formation{}
var path string
if useName {
path = formationPath(app.Name, release.ID)
} else {
path = formationPath(app.ID, release.ID)
}
res, err := s.Get(path, gotFormation)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
c.Assert(gotFormation, DeepEquals, out)
res, err = s.Get(path+"fail", gotFormation)
c.Assert(res.StatusCode, Equals, 404, Commentf("path:%s formation:", path+"fail"))
}
}
func (s *S) createTestFormation(c *C, formation *ct.Formation) *ct.Formation {
path := formationPath(formation.AppID, formation.ReleaseID)
formation.AppID = ""
formation.ReleaseID = ""
out := &ct.Formation{}
res, err := s.Put(path, formation, out)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
return out
}
func formationPath(appID, releaseID string) string {
return "/apps/" + appID + "/formations/" + releaseID
}
func (s *S) TestDeleteFormation(c *C) {
for i, useName := range []bool{false, true} {
release := s.createTestRelease(c, &ct.Release{})
app := s.createTestApp(c, &ct.App{Name: fmt.Sprintf("delete-formation-%d", i)})
out := s.createTestFormation(c, &ct.Formation{ReleaseID: release.ID, AppID: app.ID})
var path string
if useName {
path = formationPath(app.Name, release.ID)
} else {
path = formationPath(app.ID, release.ID)
}
res, err := s.Delete(path)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
res, err = s.Get(path, out)
c.Assert(res.StatusCode, Equals, 404)
}
}
func (s *S) TestCreateKey(c *C) {
in := &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5r1JfsAYIFi86KBa7C5nqKo+BLMJk29+5GsjelgBnCmn4J/QxOrVtovNcntoRLUCRwoHEMHzs3Tc6+PdswIxpX1l3YC78kgdJe6LVb962xUgP6xuxauBNRO7tnh9aPGyLbjl9j7qZAcn2/ansG1GBVoX1GSB58iBsVDH18DdVzlGwrR4OeNLmRQj8kuJEuKOoKEkW55CektcXjV08K3QSQID7aRNHgDpGGgp6XDi0GhIMsuDUGHAdPGZnqYZlxuUFaCW2hK6i1UkwnQCCEv/9IUFl2/aqVep2iX/ynrIaIsNKm16o0ooZ1gCHJEuUKRPUXhZUXqkRXqqHd3a4CUhH [email protected]"}
out := s.createTestKey(c, in)
c.Assert(out.ID, Equals, "7ab054ff4a2009fadc67e1f8b380dbee")
c.Assert(out.Key, Equals, in.Key[:strings.LastIndex(in.Key, " ")])
c.Assert(out.Comment, Equals, "[email protected]")
gotKey := &ct.Key{}
path := "/keys/" + out.ID
res, err := s.Get(path, gotKey)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
c.Assert(gotKey, DeepEquals, out)
res, err = s.Get(path+"fail", gotKey)
c.Assert(res.StatusCode, Equals, 404)
}
func (s *S) TestDeleteKey(c *C) {
key := s.createTestKey(c, &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJv/RsyRxiSAh7cU236LOCZ3vD9PO87Fi32QbojQxuGDotmk65fN6WUuL7DQjzUnWkFRu4w/svmb+9MuYK0L2b4Kc1rKXBYaytzWqGtv2VaAFObth40AlNr0V26hcTcBNQQPa23Z8LwQNgELn2b/o2CK+Pie1UbE5lHg8R+pm03cI7fYPB0jA6LIS+IVKHslVhjzxtN49xm9W0DiCxouHZEl+Fd5asgtg10HN7CV5l2+ZFyrPAkxkQrzWpkUMgfvU+xFamyczzBKMT0fTYo+TUM3w3w3njJvqXdHjo3anrUF65rSFxfeNkXoe/NQDdvWu+XBfEypWv25hlQv91JI0N"})
path := "/keys/" + key.ID
res, err := s.Delete(path)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
res, err = s.Get(path, key)
c.Assert(res.StatusCode, Equals, 404)
}
func (s *S) TestRecreateKey(c *C) {
key := &ct.Key{Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3I4gHed4RioRMoJTFdVYp9S6QhHUtMe2cdQAmaN5lVuAaEe9GmJ/wtD4pd7sCpw9daCVOD/WWKCDunrwiEwMNzZKPFQPRfrGAgpCdweD+mk62n/DuaeKJFcfB4C/iLqUrYQ9q0QNnokchI4Ts/CaWoesJOQsbtxDwxcaOlYA/Yq/nY/RA3aK0ZfZqngrOjNRuvhnNFeCF94w2CwwX9ley+PtL0LSWOK2F9D/VEAoRMY89av6WQEoho3vLH7PIOP4OKdla7ezxP9nU14MN4PSv2yUS15mZ14SkA3EF+xmO0QXYUcUi4v5UxkBpoRYNAh32KMMD70pXPRCmWvZ5pRrH [email protected]"}
originalKey := s.createTestKey(c, key)
c.Assert(originalKey.ID, Equals, "0c0432006c63fc965ef6946fb67ab559")
c.Assert(originalKey.Key, Equals, key.Key[:strings.LastIndex(key.Key, " ")])
c.Assert(originalKey.Comment, Equals, "[email protected]")
// Post a duplicate
res, err := s.Post("/keys", key, &ct.Key{})
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 500) // TODO: This should probably be a 4xx error
// Delete the original
path := "/keys/" + originalKey.ID
res, err = s.Delete(path)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
// Create the same key
newKey := s.createTestKey(c, key)
c.Assert(newKey.ID, Equals, "0c0432006c63fc965ef6946fb67ab559")
c.Assert(newKey.Key, Equals, key.Key[:strings.LastIndex(key.Key, " ")])
c.Assert(newKey.Comment, Equals, "[email protected]")
}
func (s *S) TestAppList(c *C) | {
s.createTestApp(c, &ct.App{Name: "list-test"})
var list []ct.App
res, err := s.Get("/apps", &list)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
c.Assert(len(list) > 0, Equals, true)
c.Assert(list[0].ID, Not(Equals), "")
} | identifier_body |
|
controller_test.go | Equals, name)
c.Assert(app.ID, Not(Equals), "")
if id != "" {
c.Assert(app.ID, Equals, id)
}
c.Assert(app.Protected, Equals, true)
c.Assert(app.Meta["foo"], Equals, "bar")
gotApp := &ct.App{}
res, err := s.Get("/apps/"+app.ID, gotApp)
c.Assert(err, IsNil)
c.Assert(gotApp, DeepEquals, app)
res, err = s.Get("/apps/"+app.Name, gotApp)
c.Assert(err, IsNil)
c.Assert(gotApp, DeepEquals, app)
res, err = s.Get("/apps/fail"+app.ID, gotApp)
c.Assert(res.StatusCode, Equals, 404)
}
}
func (s *S) TestUpdateApp(c *C) {
meta := map[string]string{"foo": "bar"}
app := s.createTestApp(c, &ct.App{Name: "update-app", Meta: meta})
c.Assert(app.Protected, Equals, false)
c.Assert(app.Meta, DeepEquals, meta)
gotApp := &ct.App{}
res, err := s.Post("/apps/"+app.Name, map[string]bool{"protected": true}, gotApp)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
c.Assert(gotApp.Protected, Equals, true)
c.Assert(gotApp.Meta, DeepEquals, meta)
meta = map[string]string{"foo": "baz", "bar": "foo"}
res, err = s.Post("/apps/"+app.ID, map[string]interface{}{"protected": false, "meta": meta}, gotApp)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
c.Assert(gotApp.Protected, Equals, false)
c.Assert(gotApp.Meta, DeepEquals, meta)
res, err = s.Get("/apps/"+app.ID, gotApp)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
c.Assert(gotApp.Protected, Equals, false)
c.Assert(gotApp.Meta, DeepEquals, meta)
}
func (s *S) TestDeleteApp(c *C) {
app := s.createTestApp(c, &ct.App{Name: "delete-app"})
path := "/apps/" + app.ID
res, err := s.Delete(path)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
res, err = s.Get(path, app)
c.Assert(res.StatusCode, Equals, 404)
}
func (s *S) TestRecreateApp(c *C) {
app := s.createTestApp(c, &ct.App{Name: "recreate-app"})
// Post a duplicate
res, err := s.Post("/apps", app, &ct.App{})
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 500) // TODO: This should probably be a 4xx error
// Delete the original
path := "/apps/" + app.ID
res, err = s.Delete(path)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
// Create the same key
app = s.createTestApp(c, &ct.App{Name: "recreate-app"})
c.Assert(app.Name, Equals, "recreate-app")
}
func (s *S) TestProtectedApp(c *C) {
app := s.createTestApp(c, &ct.App{Name: "protected-app", Protected: true})
release := s.createTestRelease(c, &ct.Release{
Processes: map[string]ct.ProcessType{"web": {}, "worker": {}},
})
path := formationPath(app.ID, release.ID)
for _, t := range []struct {
procs map[string]int
status int
}{
{nil, 400},
{map[string]int{"web": 1}, 400},
{map[string]int{"worker": 1, "web": 0}, 400},
{map[string]int{"worker": 1, "web": 1}, 200},
} {
res, err := s.Put(path, &ct.Formation{Processes: t.procs}, nil)
c.Assert(err, IsNil)
res.Body.Close()
c.Assert(res.StatusCode, Equals, t.status)
}
}
func (s *S) createTestArtifact(c *C, in *ct.Artifact) *ct.Artifact {
out := &ct.Artifact{}
res, err := s.Post("/artifacts", in, out)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
return out
}
func (s *S) TestCreateArtifact(c *C) {
for i, id := range []string{"", utils.UUID()} {
in := &ct.Artifact{
ID: id,
Type: "docker-image",
URI: fmt.Sprintf("docker://flynn/host?id=adsf%d", i),
}
out := s.createTestArtifact(c, in)
c.Assert(out.Type, Equals, in.Type)
c.Assert(out.URI, Equals, in.URI)
c.Assert(out.ID, Not(Equals), "")
if id != "" {
c.Assert(out.ID, Equals, id)
}
gotArtifact := &ct.Artifact{}
res, err := s.Get("/artifacts/"+out.ID, gotArtifact)
c.Assert(err, IsNil)
c.Assert(gotArtifact, DeepEquals, out)
res, err = s.Get("/artifacts/fail"+out.ID, gotArtifact)
c.Assert(res.StatusCode, Equals, 404)
}
}
func (s *S) createTestRelease(c *C, in *ct.Release) *ct.Release {
if in.ArtifactID == "" {
in.ArtifactID = s.createTestArtifact(c, &ct.Artifact{}).ID
}
out := &ct.Release{}
res, err := s.Post("/releases", in, out)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
return out
}
func (s *S) createTestKey(c *C, in *ct.Key) *ct.Key {
out := &ct.Key{}
res, err := s.Post("/keys", in, out)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
return out
}
func (s *S) TestCreateRelease(c *C) {
for _, id := range []string{"", utils.UUID()} {
in := &ct.Release{ID: id}
out := s.createTestRelease(c, in)
c.Assert(out.ArtifactID, Equals, in.ArtifactID)
if id != "" {
c.Assert(out.ID, Equals, id)
}
gotRelease := &ct.Release{}
res, err := s.Get("/releases/"+out.ID, gotRelease)
c.Assert(err, IsNil)
c.Assert(gotRelease, DeepEquals, out)
res, err = s.Get("/releases/fail"+out.ID, gotRelease)
c.Assert(res.StatusCode, Equals, 404)
}
}
func (s *S) TestCreateFormation(c *C) {
for i, useName := range []bool{false, true} {
release := s.createTestRelease(c, &ct.Release{})
app := s.createTestApp(c, &ct.App{Name: fmt.Sprintf("create-formation-%d", i)})
in := &ct.Formation{ReleaseID: release.ID, AppID: app.ID, Processes: map[string]int{"web": 1}}
if useName {
in.AppID = app.Name
}
out := s.createTestFormation(c, in)
c.Assert(out.AppID, Equals, app.ID)
c.Assert(out.ReleaseID, Equals, release.ID)
c.Assert(out.Processes["web"], Equals, 1)
gotFormation := &ct.Formation{}
var path string
if useName {
path = formationPath(app.Name, release.ID)
} else {
path = formationPath(app.ID, release.ID)
}
res, err := s.Get(path, gotFormation)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
c.Assert(gotFormation, DeepEquals, out)
res, err = s.Get(path+"fail", gotFormation)
c.Assert(res.StatusCode, Equals, 404, Commentf("path:%s formation:", path+"fail"))
}
}
func (s *S) createTestFormation(c *C, formation *ct.Formation) *ct.Formation {
path := formationPath(formation.AppID, formation.ReleaseID)
formation.AppID = ""
formation.ReleaseID = ""
out := &ct.Formation{}
res, err := s.Put(path, formation, out)
c.Assert(err, IsNil)
c.Assert(res.StatusCode, Equals, 200)
return out
}
func | (appID, releaseID string) string {
return "/apps/" + appID + "/formations/" + releaseID
}
func (s *S) TestDeleteFormation(c *C) {
for i, useName := range []bool{false, true} {
release := s.createTestRelease(c, &ct.Release{})
app := s.createTestApp(c, &ct.App{Name: fmt.Sprintf("delete-formation-%d", i)})
out | formationPath | identifier_name |
colores-empresa.component.ts | '@angular/material/core';
import { ProgressSpinnerMode } from '@angular/material/progress-spinner';
import { MatRadioChange } from '@angular/material/radio';
@Component({
selector: 'app-colores-empresa',
templateUrl: './colores-empresa.component.html',
styleUrls: ['./colores-empresa.component.css']
}) | export class ColoresEmpresaComponent implements OnInit {
selec1 = false;
selec2 = false;
selec3 = false;
selec4 = false;
ingresarOtro = false;
verOtro: any;
fraseReporte = new FormControl('');
nuevaF = new FormControl('');
public fraseForm = new FormGroup({
fraseReporteF: this.fraseReporte,
nuevaForm: this.nuevaF
});
principal = new FormControl('');
secundario = new FormControl('');
public coloresForm = new FormGroup({
color_p: this.principal,
color_s: this.secundario
});
idEmpleado: number;
empleado: any = [];
p_color: any;
s_color: any;
frase: any;
verColores: boolean = false;
verFrase: boolean = false;
/**
* Variables progress spinner
*/
color: ThemePalette = 'primary';
mode: ProgressSpinnerMode = 'indeterminate';
value = 10;
habilitarprogress: boolean = false;
constructor(
private rest: EmpresaService,
public restE: EmpleadoService,
private toastr: ToastrService,
public router: Router,
public location: Location,
public dialogRef: MatDialogRef<ColoresEmpresaComponent>,
@Inject(MAT_DIALOG_DATA) public data: any
) {
this.idEmpleado = parseInt(localStorage.getItem('empleado'));
}
ngOnInit(): void {
this.VerFormularios();
this.obtenerColores();
this.ObtenerEmpleados(this.idEmpleado);
this.ObtenerLogo();
}
VerFormularios() {
if (this.data.ventana === 'colores') {
this.verColores = true;
} else {
this.verFrase = true;
this.ImprimirFrase();
}
}
// Método para ver la información del empleado
ObtenerEmpleados(idemploy: any) {
this.empleado = [];
this.restE.getOneEmpleadoRest(idemploy).subscribe(data => {
this.empleado = data;
})
}
// Método para obtener el logo de la empresa
logo: any = String;
ObtenerLogo() {
this.rest.LogoEmpresaImagenBase64(localStorage.getItem('empresa')).subscribe(res => {
this.logo = 'data:image/jpeg;base64,' + res.imagen;
});
}
CambiarColores() {
this.habilitarprogress = true;
let datos = {
color_p: this.p_color,
color_s: this.s_color,
id: this.data.datos.id
}
this.rest.ActualizarColores(datos).subscribe(data => {
this.toastr.success('Nuevos colores registrados exitosamente', '', {
timeOut: 6000,
});
this.obtenerColores();
this.dialogRef.close({ actualizar: true });
this.habilitarprogress = false;
})
}
empresas: any = [];
obtenerColores() {
this.empresas = [];
this.rest.ConsultarDatosEmpresa(this.data.datos.id).subscribe(res => {
this.empresas = res;
this.p_color = this.empresas[0].color_p;
this.s_color = this.empresas[0].color_s;
this.frase = this.empresas[0].marca_agua;
});
}
ImprimirFrase() {
if (this.data.datos.marca_agua === 'FullTime') {
this.selec1 = true;
this.fraseForm.patchValue({ fraseReporteF: 'Fulltime' });
}
else if (this.data.datos.marca_agua === 'Confidencial') {
this.selec2 = true;
this.fraseForm.patchValue({ fraseReporteF: 'Confidencial' });
}
else if (this.data.datos.marca_agua === '') {
this.selec3 = true;
this.fraseForm.patchValue({ fraseReporteF: '' });
}
else {
this.selec4 = true;
this.fraseForm.patchValue({ fraseReporteF: 'Otro' });
}
}
IngresarFrase() {
this.verOtro = { 'visibility': 'visible' }; this.ingresarOtro = true;
}
CambiarFrase(ob: MatRadioChange) {
this.verOtro = { 'visibility': 'hidden' }; this.ingresarOtro = false;
this.fraseForm.patchValue({
nuevaForm: ''
})
this.frase = ob.value
}
VerArchivo(form) {
if (form.fraseReporteF === 'Otro') {
if (form.nuevaForm != '') {
this.frase = form.nuevaForm;
this.generarPdf('open');
}
else {
this.toastr.info('Por favor ingrese una frase o seleccione otra de las opciones listadas.', '', {
timeOut: 6000,
});
}
}
else {
this.generarPdf('open');
}
}
ActualizarFrase() {
this.habilitarprogress = true;
let datos = {
marca_agua: this.frase,
id: this.data.datos.id
}
this.rest.ActualizarMarcaAgua(datos).subscribe(data => {
this.toastr.success('Nueva frase registrada exitosamente', '', {
timeOut: 6000,
});
this.obtenerColores();
this.dialogRef.close({ actualizar: true });
this.habilitarprogress = false;
})
}
GuardarFrase(form) {
if (form.fraseReporteF === 'Otro') {
if (form.nuevaForm != '') {
this.frase = form.nuevaForm;
this.ActualizarFrase();
}
else {
this.toastr.info('Por favor ingrese una frase o seleccione otra de las opciones listadas.', '', {
timeOut: 6000,
});
}
}
else {
this.ActualizarFrase();
}
}
/******************************************************************************************************
* MÉTODO PARA EXPORTAR A PDF
******************************************************************************************************/
generarPdf(action = 'open') {
const documentDefinition = this.getDocumentDefinicion();
switch (action) {
case 'open': pdfMake.createPdf(documentDefinition).open(); break;
case 'print': pdfMake.createPdf(documentDefinition).print(); break;
case 'download': pdfMake.createPdf(documentDefinition).download(); break;
default: pdfMake.createPdf(documentDefinition).open(); break;
}
}
getDocumentDefinicion() {
sessionStorage.setItem('Empresas', this.empresas);
return {
// Encabezado de la página
pageOrientation: 'landscape',
watermark: { text: this.frase, color: 'blue', opacity: 0.1, bold: true, italics: false },
header: { text: 'Impreso por: ' + this.empleado[0].nombre + ' ' + this.empleado[0].apellido, margin: 5, fontSize: 9, opacity: 0.3, alignment: 'right' },
// Pie de página
footer: function (currentPage: any, pageCount: any, fecha: any, hora: any) {
var h = new Date();
var f = moment();
fecha = f.format('YYYY-MM-DD');
// Formato de hora actual
if (h.getMinutes() < 10) {
var time = h.getHours() + ':0' + h.getMinutes();
}
else {
var time = h.getHours() + ':' + h.getMinutes();
}
return {
margin: 10,
columns: [
'Fecha: ' + fecha + ' Hora: ' + time, ,
{
text: [
{
text: '© Pag ' + currentPage.toString() + ' of ' + pageCount,
alignment: 'right', color: 'blue', opacity: 0.5
}
],
}
], fontSize: 10, color: '#A4B8FF',
}
},
content: [
{ image: this.logo, width: 150, margin: [10, -25, 0, 5] },
{ text: 'Prueba de Colores', bold: true, fontSize: 20, alignment: 'center', margin: [0, -30, 0, 10] },
this.presentarDataPDFEmpresas(),
],
styles: {
tableHeader: { fontSize: 12, bold: true, alignment: 'center', fillColor: this.p_color },
tableHeaderS: { fontSize: 12, bold: true, alignment: 'center', fillColor: this.s_color },
itemsTable: { fontSize: 10 },
itemsTableC: { fontSize: 10, alignment: 'center' },
}
};
}
presentarDataPDFEmpresas() {
return {
columns: [
{ width: | random_line_split |
|
colores-empresa.component.ts | angular/material/core';
import { ProgressSpinnerMode } from '@angular/material/progress-spinner';
import { MatRadioChange } from '@angular/material/radio';
@Component({
selector: 'app-colores-empresa',
templateUrl: './colores-empresa.component.html',
styleUrls: ['./colores-empresa.component.css']
})
export class ColoresEmpresaComponent implements OnInit {
selec1 = false;
selec2 = false;
selec3 = false;
selec4 = false;
ingresarOtro = false;
verOtro: any;
fraseReporte = new FormControl('');
nuevaF = new FormControl('');
public fraseForm = new FormGroup({
fraseReporteF: this.fraseReporte,
nuevaForm: this.nuevaF
});
principal = new FormControl('');
secundario = new FormControl('');
public coloresForm = new FormGroup({
color_p: this.principal,
color_s: this.secundario
});
idEmpleado: number;
empleado: any = [];
p_color: any;
s_color: any;
frase: any;
verColores: boolean = false;
verFrase: boolean = false;
/**
* Variables progress spinner
*/
color: ThemePalette = 'primary';
mode: ProgressSpinnerMode = 'indeterminate';
value = 10;
habilitarprogress: boolean = false;
constructor(
private rest: EmpresaService,
public restE: EmpleadoService,
private toastr: ToastrService,
public router: Router,
public location: Location,
public dialogRef: MatDialogRef<ColoresEmpresaComponent>,
@Inject(MAT_DIALOG_DATA) public data: any
) |
ngOnInit(): void {
this.VerFormularios();
this.obtenerColores();
this.ObtenerEmpleados(this.idEmpleado);
this.ObtenerLogo();
}
VerFormularios() {
if (this.data.ventana === 'colores') {
this.verColores = true;
} else {
this.verFrase = true;
this.ImprimirFrase();
}
}
// Método para ver la información del empleado
ObtenerEmpleados(idemploy: any) {
this.empleado = [];
this.restE.getOneEmpleadoRest(idemploy).subscribe(data => {
this.empleado = data;
})
}
// Método para obtener el logo de la empresa
logo: any = String;
ObtenerLogo() {
this.rest.LogoEmpresaImagenBase64(localStorage.getItem('empresa')).subscribe(res => {
this.logo = 'data:image/jpeg;base64,' + res.imagen;
});
}
CambiarColores() {
this.habilitarprogress = true;
let datos = {
color_p: this.p_color,
color_s: this.s_color,
id: this.data.datos.id
}
this.rest.ActualizarColores(datos).subscribe(data => {
this.toastr.success('Nuevos colores registrados exitosamente', '', {
timeOut: 6000,
});
this.obtenerColores();
this.dialogRef.close({ actualizar: true });
this.habilitarprogress = false;
})
}
empresas: any = [];
obtenerColores() {
this.empresas = [];
this.rest.ConsultarDatosEmpresa(this.data.datos.id).subscribe(res => {
this.empresas = res;
this.p_color = this.empresas[0].color_p;
this.s_color = this.empresas[0].color_s;
this.frase = this.empresas[0].marca_agua;
});
}
ImprimirFrase() {
if (this.data.datos.marca_agua === 'FullTime') {
this.selec1 = true;
this.fraseForm.patchValue({ fraseReporteF: 'Fulltime' });
}
else if (this.data.datos.marca_agua === 'Confidencial') {
this.selec2 = true;
this.fraseForm.patchValue({ fraseReporteF: 'Confidencial' });
}
else if (this.data.datos.marca_agua === '') {
this.selec3 = true;
this.fraseForm.patchValue({ fraseReporteF: '' });
}
else {
this.selec4 = true;
this.fraseForm.patchValue({ fraseReporteF: 'Otro' });
}
}
IngresarFrase() {
this.verOtro = { 'visibility': 'visible' }; this.ingresarOtro = true;
}
CambiarFrase(ob: MatRadioChange) {
this.verOtro = { 'visibility': 'hidden' }; this.ingresarOtro = false;
this.fraseForm.patchValue({
nuevaForm: ''
})
this.frase = ob.value
}
VerArchivo(form) {
if (form.fraseReporteF === 'Otro') {
if (form.nuevaForm != '') {
this.frase = form.nuevaForm;
this.generarPdf('open');
}
else {
this.toastr.info('Por favor ingrese una frase o seleccione otra de las opciones listadas.', '', {
timeOut: 6000,
});
}
}
else {
this.generarPdf('open');
}
}
ActualizarFrase() {
this.habilitarprogress = true;
let datos = {
marca_agua: this.frase,
id: this.data.datos.id
}
this.rest.ActualizarMarcaAgua(datos).subscribe(data => {
this.toastr.success('Nueva frase registrada exitosamente', '', {
timeOut: 6000,
});
this.obtenerColores();
this.dialogRef.close({ actualizar: true });
this.habilitarprogress = false;
})
}
GuardarFrase(form) {
if (form.fraseReporteF === 'Otro') {
if (form.nuevaForm != '') {
this.frase = form.nuevaForm;
this.ActualizarFrase();
}
else {
this.toastr.info('Por favor ingrese una frase o seleccione otra de las opciones listadas.', '', {
timeOut: 6000,
});
}
}
else {
this.ActualizarFrase();
}
}
/******************************************************************************************************
* MÉTODO PARA EXPORTAR A PDF
******************************************************************************************************/
generarPdf(action = 'open') {
const documentDefinition = this.getDocumentDefinicion();
switch (action) {
case 'open': pdfMake.createPdf(documentDefinition).open(); break;
case 'print': pdfMake.createPdf(documentDefinition).print(); break;
case 'download': pdfMake.createPdf(documentDefinition).download(); break;
default: pdfMake.createPdf(documentDefinition).open(); break;
}
}
getDocumentDefinicion() {
sessionStorage.setItem('Empresas', this.empresas);
return {
// Encabezado de la página
pageOrientation: 'landscape',
watermark: { text: this.frase, color: 'blue', opacity: 0.1, bold: true, italics: false },
header: { text: 'Impreso por: ' + this.empleado[0].nombre + ' ' + this.empleado[0].apellido, margin: 5, fontSize: 9, opacity: 0.3, alignment: 'right' },
// Pie de página
footer: function (currentPage: any, pageCount: any, fecha: any, hora: any) {
var h = new Date();
var f = moment();
fecha = f.format('YYYY-MM-DD');
// Formato de hora actual
if (h.getMinutes() < 10) {
var time = h.getHours() + ':0' + h.getMinutes();
}
else {
var time = h.getHours() + ':' + h.getMinutes();
}
return {
margin: 10,
columns: [
'Fecha: ' + fecha + ' Hora: ' + time, ,
{
text: [
{
text: '© Pag ' + currentPage.toString() + ' of ' + pageCount,
alignment: 'right', color: 'blue', opacity: 0.5
}
],
}
], fontSize: 10, color: '#A4B8FF',
}
},
content: [
{ image: this.logo, width: 150, margin: [10, -25, 0, 5] },
{ text: 'Prueba de Colores', bold: true, fontSize: 20, alignment: 'center', margin: [0, -30, 0, 10] },
this.presentarDataPDFEmpresas(),
],
styles: {
tableHeader: { fontSize: 12, bold: true, alignment: 'center', fillColor: this.p_color },
tableHeaderS: { fontSize: 12, bold: true, alignment: 'center', fillColor: this.s_color },
itemsTable: { fontSize: 10 },
itemsTableC: { fontSize: 10, alignment: 'center' },
}
};
}
presentarDataPDFEmpresas() {
return {
columns: [
{ width | {
this.idEmpleado = parseInt(localStorage.getItem('empleado'));
} | identifier_body |
colores-empresa.component.ts | this.idEmpleado = parseInt(localStorage.getItem('empleado'));
}
ngOnInit(): void {
this.VerFormularios();
this.obtenerColores();
this.ObtenerEmpleados(this.idEmpleado);
this.ObtenerLogo();
}
VerFormularios() {
if (this.data.ventana === 'colores') {
this.verColores = true;
} else {
this.verFrase = true;
this.ImprimirFrase();
}
}
// Método para ver la información del empleado
ObtenerEmpleados(idemploy: any) {
this.empleado = [];
this.restE.getOneEmpleadoRest(idemploy).subscribe(data => {
this.empleado = data;
})
}
// Método para obtener el logo de la empresa
logo: any = String;
ObtenerLogo() {
this.rest.LogoEmpresaImagenBase64(localStorage.getItem('empresa')).subscribe(res => {
this.logo = 'data:image/jpeg;base64,' + res.imagen;
});
}
CambiarColores() {
this.habilitarprogress = true;
let datos = {
color_p: this.p_color,
color_s: this.s_color,
id: this.data.datos.id
}
this.rest.ActualizarColores(datos).subscribe(data => {
this.toastr.success('Nuevos colores registrados exitosamente', '', {
timeOut: 6000,
});
this.obtenerColores();
this.dialogRef.close({ actualizar: true });
this.habilitarprogress = false;
})
}
empresas: any = [];
obtenerColores() {
this.empresas = [];
this.rest.ConsultarDatosEmpresa(this.data.datos.id).subscribe(res => {
this.empresas = res;
this.p_color = this.empresas[0].color_p;
this.s_color = this.empresas[0].color_s;
this.frase = this.empresas[0].marca_agua;
});
}
ImprimirFrase() {
if (this.data.datos.marca_agua === 'FullTime') {
this.selec1 = true;
this.fraseForm.patchValue({ fraseReporteF: 'Fulltime' });
}
else if (this.data.datos.marca_agua === 'Confidencial') {
this.selec2 = true;
this.fraseForm.patchValue({ fraseReporteF: 'Confidencial' });
}
else if (this.data.datos.marca_agua === '') {
this.selec3 = true;
this.fraseForm.patchValue({ fraseReporteF: '' });
}
else {
this.selec4 = true;
this.fraseForm.patchValue({ fraseReporteF: 'Otro' });
}
}
IngresarFrase() {
this.verOtro = { 'visibility': 'visible' }; this.ingresarOtro = true;
}
CambiarFrase(ob: MatRadioChange) {
this.verOtro = { 'visibility': 'hidden' }; this.ingresarOtro = false;
this.fraseForm.patchValue({
nuevaForm: ''
})
this.frase = ob.value
}
VerArchivo(form) {
if (form.fraseReporteF === 'Otro') {
if (form.nuevaForm != '') {
this.frase = form.nuevaForm;
this.generarPdf('open');
}
else {
this.toastr.info('Por favor ingrese una frase o seleccione otra de las opciones listadas.', '', {
timeOut: 6000,
});
}
}
else {
this.generarPdf('open');
}
}
ActualizarFrase() {
this.habilitarprogress = true;
let datos = {
marca_agua: this.frase,
id: this.data.datos.id
}
this.rest.ActualizarMarcaAgua(datos).subscribe(data => {
this.toastr.success('Nueva frase registrada exitosamente', '', {
timeOut: 6000,
});
this.obtenerColores();
this.dialogRef.close({ actualizar: true });
this.habilitarprogress = false;
})
}
GuardarFrase(form) {
if (form.fraseReporteF === 'Otro') {
if (form.nuevaForm != '') {
this.frase = form.nuevaForm;
this.ActualizarFrase();
}
else {
this.toastr.info('Por favor ingrese una frase o seleccione otra de las opciones listadas.', '', {
timeOut: 6000,
});
}
}
else {
this.ActualizarFrase();
}
}
/******************************************************************************************************
* MÉTODO PARA EXPORTAR A PDF
******************************************************************************************************/
generarPdf(action = 'open') {
const documentDefinition = this.getDocumentDefinicion();
switch (action) {
case 'open': pdfMake.createPdf(documentDefinition).open(); break;
case 'print': pdfMake.createPdf(documentDefinition).print(); break;
case 'download': pdfMake.createPdf(documentDefinition).download(); break;
default: pdfMake.createPdf(documentDefinition).open(); break;
}
}
getDocumentDefinicion() {
sessionStorage.setItem('Empresas', this.empresas);
return {
// Encabezado de la página
pageOrientation: 'landscape',
watermark: { text: this.frase, color: 'blue', opacity: 0.1, bold: true, italics: false },
header: { text: 'Impreso por: ' + this.empleado[0].nombre + ' ' + this.empleado[0].apellido, margin: 5, fontSize: 9, opacity: 0.3, alignment: 'right' },
// Pie de página
footer: function (currentPage: any, pageCount: any, fecha: any, hora: any) {
var h = new Date();
var f = moment();
fecha = f.format('YYYY-MM-DD');
// Formato de hora actual
if (h.getMinutes() < 10) {
var time = h.getHours() + ':0' + h.getMinutes();
}
else {
var time = h.getHours() + ':' + h.getMinutes();
}
return {
margin: 10,
columns: [
'Fecha: ' + fecha + ' Hora: ' + time, ,
{
text: [
{
text: '© Pag ' + currentPage.toString() + ' of ' + pageCount,
alignment: 'right', color: 'blue', opacity: 0.5
}
],
}
], fontSize: 10, color: '#A4B8FF',
}
},
content: [
{ image: this.logo, width: 150, margin: [10, -25, 0, 5] },
{ text: 'Prueba de Colores', bold: true, fontSize: 20, alignment: 'center', margin: [0, -30, 0, 10] },
this.presentarDataPDFEmpresas(),
],
styles: {
tableHeader: { fontSize: 12, bold: true, alignment: 'center', fillColor: this.p_color },
tableHeaderS: { fontSize: 12, bold: true, alignment: 'center', fillColor: this.s_color },
itemsTable: { fontSize: 10 },
itemsTableC: { fontSize: 10, alignment: 'center' },
}
};
}
presentarDataPDFEmpresas() {
return {
columns: [
{ width: '*', text: '' },
{
width: 'auto',
table: {
widths: [30, 'auto', 'auto', '*', '*', 'auto', 'auto', '*', '*'],
body: [
[
{ text: 'Id', style: 'tableHeader' },
{ text: 'Nombre', style: 'tableHeader' },
{ text: 'RUC', style: 'tableHeader' },
{ text: 'Dirección', style: 'tableHeader' },
{ text: 'Teléfono', style: 'tableHeader' },
{ text: 'Correo', style: 'tableHeader' },
{ text: 'Tipo de Empresa', style: 'tableHeader' },
{ text: 'Representante', style: 'tableHeader' },
{ text: 'Resumen', style: 'tableHeaderS' }
],
...this.empresas.map(obj => {
return [
{ text: obj.id, style: 'itemsTableC' },
{ text: obj.nombre, style: 'itemsTable' },
{ text: obj.ruc, style: 'itemsTableC' },
{ text: obj.direccion, style: 'itemsTable' },
{ text: obj.telefono, style: 'itemsTableC' },
{ text: obj.correo, style: 'itemsTable' },
{ text: obj.tipo_empresa, style: 'itemsTable' },
{ text: obj.representante, style: 'itemsTable' },
{ text: 'Generalidades', style: 'itemsTable' },
];
})
]
}
},
{ width: '*', text: '' },
]
};
}
cerrarVen | tana() {
| identifier_name |
|
SuggestionManagerFRS.py |
def makeFriendshipScore(self,email,friend):
""" score friendship """
# * the total score will be out of 100 %
# * weight for chat frequencey is 60%
# * weight for duration from last chat 40%
friendMgr =FriendshipManagerFRS()
array = friendMgr.selectRelationship(email,friend)
c=0
try:
if len(array)==5:
c+=1
#print "\n Id:"+str(c)
#print"User: %s"% str(i['email'])
#print "Dur:%s days from last chat" % str(i['duration'])
#print "Chats: %s chats"% str(i['chats'])
total=-1
chatVoteScore=self.scoreChatsNVotes(array[0],array[2],array[3])
durationScore=self.scoreDur(array[1])
total =self.calculateTotalScore(chatVoteScore,durationScore)
#deduct / add by votes
#print "chat score %s"% str(chatScore)
#print "duration score %s" % str(durationScore)
#print "total score %s"%str(float(total)/float(100))
"return score"
return float(total)/100.0
except Exception, e:
print str(e.message)
def scoreChatsNVotes(self,chats,pvotes,nvotes):
"Score Chats represents the affnity score"
# 0 : 0
# >250 : 60
# 0-25 : 10
#25-50 : 20
#50-150 :30
#150-250:40
#250-500:50
chats =int(chats)
pvotes= int(pvotes)
nvotes = int(nvotes)
if chats == 0: #no chats no marks
if pvotes>nvotes:#if pvotes>=nvotes socre:+5 only ;else return 0
return 5
else:return 0
if 500<chats: # chats more than 250 full marks
if nvotes>pvotes :#chats ?votes-5only
return 55
else:return 60
score=0
if 0<chats and chats <= 25:
score= 10
elif 25<chats and chats<=50:
score= 20
elif chats<50 and chats<=150:
score= 30
elif 150<chats and chats<=250:
score= 40
elif 250<chats and chats<=500:
score =50
score=self.voteHandler(score,pvotes,nvotes)
return score
#score for votes
def voteHandler(self,score,pv,nv):
pv =int(pv)
nv= int(nv)
if score>=5:
if pv> nv:
score+=5
elif pv<nv:
score-=5
return score
#score Duration
def scoreDur(self,dur):
"duration represents time decay"
dur =int(dur)
if 730 <dur: #more than 2 years
return 0
if dur ==0: # today
return 40
if 0<dur and dur<=182: #less than 6 months
return 30
elif 182<dur and 365>=dur: # 6 month - 1 year
return 20
elif 365<dur and 730>=dur:# 1 year - 2 years
return 10
#calculate cumulative score
def calculateTotalScore(self,chat,duration):
if chat!=None and duration != None:
return chat+ duration
else:
if chat ==None:
chat =0
if duration == None:
duration = 0
return duration + chat
#sort the 8 product category values of user in decending order
def sortUserPreferences(self,user):
userMgr =UserManagerFRS()
categories = userMgr.getCategoryExp(user)
#sort list
if len(categories)!=0:
categories=sorted(categories.items(), key=operator.itemgetter(1))
return categories
#get maximum category out of given list of product categories
def getMaxCategory(self,categories):
categories = dict(categories)
maxIndex=max(categories.items(), key=operator.itemgetter(1))[0] # index of max.
maxValue =max(categories.items(), key=operator.itemgetter(1))[1] # max. value
maxCat ={maxIndex:maxValue}
return maxCat
#update all the relationship strength of user
def upgradeRelationshipStrength(self,user):
fMgr = FriendshipManagerFRS()
user =str(user)
allFriends = fMgr.selectAllFriends(user)
for friend in allFriends:
score=0
try:
email = str(friend['email'])
score =self.makeFriendshipScore(user,email) #calculate friendship score
fMgr.upgradeRelationship(user,email,"strength",score)
except Exception,e:
print str(e.message)+" -2"
continue
finally:print email+"-> STR:"+str(score)
#refine selected list of users for chat
def refineChatList(self,user,catId):
categoryKey ="cat"+catId
friendMgr = FriendshipManagerFRS()
uMgr =UserManagerFRS()
#sorted on expereince about product category
expFriends = friendMgr.selectFriendsForChatOnExp(user,categoryKey)
print "sorted friendlist from highest experience about product category: \n \n"+str( expFriends)
#sorted on relationship strength
closeFriends = friendMgr.selectAllFriends(user)
print "\n\nsorted friendlist from highest relationship strength :\n \n"+str(closeFriends)
#merge the lists
mixList=self.mixLists(closeFriends,expFriends)
#perpare final list
finalList =[]
for item in mixList:
friend={'friend':str(item)}
finalList.append(friend)
return finalList
#merge two user lists with same length and remove reducdency
def mixLists(self,closeFriends,expFriends):
finalList=[]
for OutItem in closeFriends:
chk =finalList.count(OutItem['email'])
if chk==0:
finalList.append(OutItem['email'])
else:continue
for InItem in expFriends:
chkIn =finalList.count(InItem['email'])
if chkIn ==0:
if OutItem!=InItem:
finalList.append(InItem['email'])
break
else:continue
return finalList
#suggest New Friends for User
def suggestNewFriends(self,email):
#get favourite product category
fMgr =FriendshipManagerFRS()
categories =self.sortUserPreferences(email)
maxCat = self.getMaxCategory(categories)
print "product category favourations of user: "+str(email)+" are :\n"+str(categories)
print "most favourite category of user is : cat-"+str(maxCat)
print '\ntherefore the friends of friends of user are suggested sorted according to this category value'
key =0
value=0
for index in maxCat:
key = index
value = maxCat[index]
break
category= "cat"+str(key)
#select the friends of friends with favouration to the same category(sorted desc) which user node does
candidates =fMgr.selectFriendsForFriendship(email,category)
#dispatch
#for user in candidates:
# print str(user['email'])+"->"+str(user[category])
return candidates
#replace existing relationship with random values
#def replaceRandomValuesInRel(self,fEmail,sEmail):
# #generating values
# fMgr = FriendshipManagerFRS()
# dates = fMgr.getRandomDate()
# chats = int(random.uniform(0,200))
# nVotes = int(random.uniform(0,200))
# pVotes = int(random.uniform(0,200))
# score= 0
# try:
# #update relationship
# fMgr.upgradeRelationship(fEmail,sEmail,"chats",str(chats))
# fMgr.upgradeRelationship(fEmail,sEmail,"nVotes",str(nVotes))
# fMgr.upgradeRelationship(fEmail,sEmail,"pVotes",str(pVotes))
# fMgr.upgradeRelationship(fEmail,sEmail,"started",str(dates[0]))
# fMgr.upgradeRelationship(fEmail,sEmail,"triggered",str(dates[1]))
# fMgr.upgradeRelationship(fEmail,sEmail,"duration",str(dates[ | SuggestionManagerFRS =self | identifier_body |
|
SuggestionManagerFRS.py | (object):
def __init__(self):
SuggestionManagerFRS =self
def makeFriendshipScore(self,email,friend):
""" score friendship """
# * the total score will be out of 100 %
# * weight for chat frequencey is 60%
# * weight for duration from last chat 40%
friendMgr =FriendshipManagerFRS()
array = friendMgr.selectRelationship(email,friend)
c=0
try:
if len(array)==5:
c+=1
#print "\n Id:"+str(c)
#print"User: %s"% str(i['email'])
#print "Dur:%s days from last chat" % str(i['duration'])
#print "Chats: %s chats"% str(i['chats'])
total=-1
chatVoteScore=self.scoreChatsNVotes(array[0],array[2],array[3])
durationScore=self.scoreDur(array[1])
total =self.calculateTotalScore(chatVoteScore,durationScore)
#deduct / add by votes
#print "chat score %s"% str(chatScore)
#print "duration score %s" % str(durationScore)
#print "total score %s"%str(float(total)/float(100))
"return score"
return float(total)/100.0
except Exception, e:
print str(e.message)
def scoreChatsNVotes(self,chats,pvotes,nvotes):
"Score Chats represents the affnity score"
# 0 : 0
# >250 : 60
# 0-25 : 10
#25-50 : 20
#50-150 :30
#150-250:40
#250-500:50
chats =int(chats)
pvotes= int(pvotes)
nvotes = int(nvotes)
if chats == 0: #no chats no marks
if pvotes>nvotes:#if pvotes>=nvotes socre:+5 only ;else return 0
return 5
else:return 0
if 500<chats: # chats more than 250 full marks
if nvotes>pvotes :#chats ?votes-5only
return 55
else:return 60
score=0
if 0<chats and chats <= 25:
score= 10
elif 25<chats and chats<=50:
score= 20
elif chats<50 and chats<=150:
score= 30
elif 150<chats and chats<=250:
score= 40
elif 250<chats and chats<=500:
score =50
score=self.voteHandler(score,pvotes,nvotes)
return score
#score for votes
def voteHandler(self,score,pv,nv):
pv =int(pv)
nv= int(nv)
if score>=5:
if pv> nv:
score+=5
elif pv<nv:
score-=5
return score
#score Duration
def scoreDur(self,dur):
"duration represents time decay"
dur =int(dur)
if 730 <dur: #more than 2 years
return 0
if dur ==0: # today
return 40
if 0<dur and dur<=182: #less than 6 months
return 30
elif 182<dur and 365>=dur: # 6 month - 1 year
return 20
elif 365<dur and 730>=dur:# 1 year - 2 years
return 10
#calculate cumulative score
def calculateTotalScore(self,chat,duration):
if chat!=None and duration != None:
return chat+ duration
else:
if chat ==None:
chat =0
if duration == None:
duration = 0
return duration + chat
#sort the 8 product category values of user in decending order
def sortUserPreferences(self,user):
userMgr =UserManagerFRS()
categories = userMgr.getCategoryExp(user)
#sort list
if len(categories)!=0:
categories=sorted(categories.items(), key=operator.itemgetter(1))
return categories
#get maximum category out of given list of product categories
def getMaxCategory(self,categories):
categories = dict(categories)
maxIndex=max(categories.items(), key=operator.itemgetter(1))[0] # index of max.
maxValue =max(categories.items(), key=operator.itemgetter(1))[1] # max. value
maxCat ={maxIndex:maxValue}
return maxCat
#update all the relationship strength of user
def upgradeRelationshipStrength(self,user):
fMgr = FriendshipManagerFRS()
user =str(user)
allFriends = fMgr.selectAllFriends(user)
for friend in allFriends:
score=0
try:
email = str(friend['email'])
score =self.makeFriendshipScore(user,email) #calculate friendship score
fMgr.upgradeRelationship(user,email,"strength",score)
except Exception,e:
print str(e.message)+" -2"
continue
finally:print email+"-> STR:"+str(score)
#refine selected list of users for chat
def refineChatList(self,user,catId):
categoryKey ="cat"+catId
friendMgr = FriendshipManagerFRS()
uMgr =UserManagerFRS()
#sorted on expereince about product category
expFriends = friendMgr.selectFriendsForChatOnExp(user,categoryKey)
print "sorted friendlist from highest experience about product category: \n \n"+str( expFriends)
#sorted on relationship strength
closeFriends = friendMgr.selectAllFriends(user)
print "\n\nsorted friendlist from highest relationship strength :\n \n"+str(closeFriends)
#merge the lists
mixList=self.mixLists(closeFriends,expFriends)
#perpare final list
finalList =[]
for item in mixList:
friend={'friend':str(item)}
finalList.append(friend)
return finalList
#merge two user lists with same length and remove reducdency
def mixLists(self,closeFriends,expFriends):
finalList=[]
for OutItem in closeFriends:
chk =finalList.count(OutItem['email'])
if chk==0:
finalList.append(OutItem['email'])
else:continue
for InItem in expFriends:
chkIn =finalList.count(InItem['email'])
if chkIn ==0:
if OutItem!=InItem:
finalList.append(InItem['email'])
break
else:continue
return finalList
#suggest New Friends for User
def suggestNewFriends(self,email):
#get favourite product category
fMgr =FriendshipManagerFRS()
categories =self.sortUserPreferences(email)
maxCat = self.getMaxCategory(categories)
print "product category favourations of user: "+str(email)+" are :\n"+str(categories)
print "most favourite category of user is : cat-"+str(maxCat)
print '\ntherefore the friends of friends of user are suggested sorted according to this category value'
key =0
value=0
for index in maxCat:
key = index
value = maxCat[index]
break
category= "cat"+str(key)
#select the friends of friends with favouration to the same category(sorted desc) which user node does
candidates =fMgr.selectFriendsForFriendship(email,category)
#dispatch
#for user in candidates:
# print str(user['email'])+"->"+str(user[category])
return candidates
#replace existing relationship with random values
#def replaceRandomValuesInRel(self,fEmail,sEmail):
# #generating values
# fMgr = FriendshipManagerFRS()
# dates = fMgr.getRandomDate()
# chats = int(random.uniform(0,200))
# nVotes = int(random.uniform(0,200))
# pVotes = int(random.uniform(0,200))
# score= 0
# try:
# #update relationship
# fMgr.upgradeRelationship(fEmail,sEmail,"chats",str(chats))
# fMgr.upgradeRelationship(fEmail,sEmail,"nVotes",str(nVotes))
# fMgr.upgradeRelationship(fEmail,sEmail,"pVotes",str(pVotes))
# fMgr.upgradeRelationship(fEmail,sEmail,"started",str(dates[0]))
# fMgr.upgradeRelationship(fEmail,sEmail,"triggered",str(dates[1]))
# f | SuggestionManagerFRS | identifier_name |
|
SuggestionManagerFRS.py | 40%
friendMgr =FriendshipManagerFRS()
array = friendMgr.selectRelationship(email,friend)
c=0
try:
if len(array)==5:
c+=1
#print "\n Id:"+str(c)
#print"User: %s"% str(i['email'])
#print "Dur:%s days from last chat" % str(i['duration'])
#print "Chats: %s chats"% str(i['chats'])
total=-1
chatVoteScore=self.scoreChatsNVotes(array[0],array[2],array[3])
durationScore=self.scoreDur(array[1])
total =self.calculateTotalScore(chatVoteScore,durationScore)
#deduct / add by votes
#print "chat score %s"% str(chatScore)
#print "duration score %s" % str(durationScore)
#print "total score %s"%str(float(total)/float(100))
"return score"
return float(total)/100.0
except Exception, e:
print str(e.message)
def scoreChatsNVotes(self,chats,pvotes,nvotes):
"Score Chats represents the affnity score"
# 0 : 0
# >250 : 60
# 0-25 : 10
#25-50 : 20
#50-150 :30
#150-250:40
#250-500:50
chats =int(chats)
pvotes= int(pvotes)
nvotes = int(nvotes)
if chats == 0: #no chats no marks
if pvotes>nvotes:#if pvotes>=nvotes socre:+5 only ;else return 0
return 5
else:return 0
if 500<chats: # chats more than 250 full marks
if nvotes>pvotes :#chats ?votes-5only
return 55
else:return 60
score=0
if 0<chats and chats <= 25:
score= 10
elif 25<chats and chats<=50:
score= 20
elif chats<50 and chats<=150:
score= 30
elif 150<chats and chats<=250:
score= 40
elif 250<chats and chats<=500:
score =50
score=self.voteHandler(score,pvotes,nvotes)
return score
#score for votes
def voteHandler(self,score,pv,nv):
pv =int(pv)
nv= int(nv)
if score>=5:
if pv> nv:
score+=5
elif pv<nv:
|
return score
#score Duration
def scoreDur(self,dur):
"duration represents time decay"
dur =int(dur)
if 730 <dur: #more than 2 years
return 0
if dur ==0: # today
return 40
if 0<dur and dur<=182: #less than 6 months
return 30
elif 182<dur and 365>=dur: # 6 month - 1 year
return 20
elif 365<dur and 730>=dur:# 1 year - 2 years
return 10
#calculate cumulative score
def calculateTotalScore(self,chat,duration):
if chat!=None and duration != None:
return chat+ duration
else:
if chat ==None:
chat =0
if duration == None:
duration = 0
return duration + chat
#sort the 8 product category values of user in decending order
def sortUserPreferences(self,user):
userMgr =UserManagerFRS()
categories = userMgr.getCategoryExp(user)
#sort list
if len(categories)!=0:
categories=sorted(categories.items(), key=operator.itemgetter(1))
return categories
#get maximum category out of given list of product categories
def getMaxCategory(self,categories):
categories = dict(categories)
maxIndex=max(categories.items(), key=operator.itemgetter(1))[0] # index of max.
maxValue =max(categories.items(), key=operator.itemgetter(1))[1] # max. value
maxCat ={maxIndex:maxValue}
return maxCat
#update all the relationship strength of user
def upgradeRelationshipStrength(self,user):
fMgr = FriendshipManagerFRS()
user =str(user)
allFriends = fMgr.selectAllFriends(user)
for friend in allFriends:
score=0
try:
email = str(friend['email'])
score =self.makeFriendshipScore(user,email) #calculate friendship score
fMgr.upgradeRelationship(user,email,"strength",score)
except Exception,e:
print str(e.message)+" -2"
continue
finally:print email+"-> STR:"+str(score)
#refine selected list of users for chat
def refineChatList(self,user,catId):
categoryKey ="cat"+catId
friendMgr = FriendshipManagerFRS()
uMgr =UserManagerFRS()
#sorted on expereince about product category
expFriends = friendMgr.selectFriendsForChatOnExp(user,categoryKey)
print "sorted friendlist from highest experience about product category: \n \n"+str( expFriends)
#sorted on relationship strength
closeFriends = friendMgr.selectAllFriends(user)
print "\n\nsorted friendlist from highest relationship strength :\n \n"+str(closeFriends)
#merge the lists
mixList=self.mixLists(closeFriends,expFriends)
#perpare final list
finalList =[]
for item in mixList:
friend={'friend':str(item)}
finalList.append(friend)
return finalList
#merge two user lists with same length and remove reducdency
def mixLists(self,closeFriends,expFriends):
finalList=[]
for OutItem in closeFriends:
chk =finalList.count(OutItem['email'])
if chk==0:
finalList.append(OutItem['email'])
else:continue
for InItem in expFriends:
chkIn =finalList.count(InItem['email'])
if chkIn ==0:
if OutItem!=InItem:
finalList.append(InItem['email'])
break
else:continue
return finalList
#suggest New Friends for User
def suggestNewFriends(self,email):
#get favourite product category
fMgr =FriendshipManagerFRS()
categories =self.sortUserPreferences(email)
maxCat = self.getMaxCategory(categories)
print "product category favourations of user: "+str(email)+" are :\n"+str(categories)
print "most favourite category of user is : cat-"+str(maxCat)
print '\ntherefore the friends of friends of user are suggested sorted according to this category value'
key =0
value=0
for index in maxCat:
key = index
value = maxCat[index]
break
category= "cat"+str(key)
#select the friends of friends with favouration to the same category(sorted desc) which user node does
candidates =fMgr.selectFriendsForFriendship(email,category)
#dispatch
#for user in candidates:
# print str(user['email'])+"->"+str(user[category])
return candidates
#replace existing relationship with random values
#def replaceRandomValuesInRel(self,fEmail,sEmail):
# #generating values
# fMgr = FriendshipManagerFRS()
# dates = fMgr.getRandomDate()
# chats = int(random.uniform(0,200))
# nVotes = int(random.uniform(0,200))
# pVotes = int(random.uniform(0,200))
# score= 0
# try:
# #update relationship
# fMgr.upgradeRelationship(fEmail,sEmail,"chats",str(chats))
# fMgr.upgradeRelationship(fEmail,sEmail,"nVotes",str(nVotes))
# fMgr.upgradeRelationship(fEmail,sEmail,"pVotes",str(pVotes))
# fMgr.upgradeRelationship(fEmail,sEmail,"started",str(dates[0]))
# fMgr.upgradeRelationship(fEmail,sEmail,"triggered",str(dates[1]))
# fMgr.upgradeRelationship(fEmail,sEmail,"duration",str(dates[2]))
# except Exception ,e:
# print e.message
# return False
# finally:
# #calculate and save frienship score
# score = self.makeFriendshipScore(fEmail,sEmail)
# print "Strength"+str | score-=5 | conditional_block |
SuggestionManagerFRS.py | for duration from last chat 40%
friendMgr =FriendshipManagerFRS()
array = friendMgr.selectRelationship(email,friend)
c=0
try:
if len(array)==5:
c+=1
#print "\n Id:"+str(c)
#print"User: %s"% str(i['email'])
#print "Dur:%s days from last chat" % str(i['duration'])
#print "Chats: %s chats"% str(i['chats'])
total=-1
chatVoteScore=self.scoreChatsNVotes(array[0],array[2],array[3])
durationScore=self.scoreDur(array[1])
total =self.calculateTotalScore(chatVoteScore,durationScore)
#deduct / add by votes
#print "chat score %s"% str(chatScore)
#print "duration score %s" % str(durationScore)
#print "total score %s"%str(float(total)/float(100))
"return score"
return float(total)/100.0
except Exception, e:
print str(e.message)
def scoreChatsNVotes(self,chats,pvotes,nvotes):
"Score Chats represents the affnity score"
# 0 : 0
# >250 : 60
# 0-25 : 10
#25-50 : 20
#50-150 :30
#150-250:40
#250-500:50
chats =int(chats)
pvotes= int(pvotes)
nvotes = int(nvotes)
if chats == 0: #no chats no marks
if pvotes>nvotes:#if pvotes>=nvotes socre:+5 only ;else return 0
return 5
else:return 0
if 500<chats: # chats more than 250 full marks
if nvotes>pvotes :#chats ?votes-5only
return 55
else:return 60
score=0
if 0<chats and chats <= 25:
score= 10
elif 25<chats and chats<=50:
score= 20
elif chats<50 and chats<=150:
score= 30
elif 150<chats and chats<=250:
score= 40
elif 250<chats and chats<=500:
score =50
score=self.voteHandler(score,pvotes,nvotes)
return score
#score for votes
def voteHandler(self,score,pv,nv):
pv =int(pv)
nv= int(nv)
if score>=5:
if pv> nv:
score+=5
elif pv<nv:
score-=5
return score
#score Duration
def scoreDur(self,dur):
"duration represents time decay"
dur =int(dur)
if 730 <dur: #more than 2 years
return 0
if dur ==0: # today
return 40
if 0<dur and dur<=182: #less than 6 months
return 30
elif 182<dur and 365>=dur: # 6 month - 1 year
return 20
elif 365<dur and 730>=dur:# 1 year - 2 years
return 10
#calculate cumulative score
def calculateTotalScore(self,chat,duration):
if chat!=None and duration != None:
return chat+ duration
else:
if chat ==None:
chat =0
if duration == None:
duration = 0
return duration + chat
#sort the 8 product category values of user in decending order
def sortUserPreferences(self,user):
userMgr =UserManagerFRS()
categories = userMgr.getCategoryExp(user)
#sort list
if len(categories)!=0:
categories=sorted(categories.items(), key=operator.itemgetter(1))
return categories
#get maximum category out of given list of product categories
def getMaxCategory(self,categories):
categories = dict(categories)
maxIndex=max(categories.items(), key=operator.itemgetter(1))[0] # index of max.
maxValue =max(categories.items(), key=operator.itemgetter(1))[1] # max. value
|
#update all the relationship strength of user
def upgradeRelationshipStrength(self,user):
fMgr = FriendshipManagerFRS()
user =str(user)
allFriends = fMgr.selectAllFriends(user)
for friend in allFriends:
score=0
try:
email = str(friend['email'])
score =self.makeFriendshipScore(user,email) #calculate friendship score
fMgr.upgradeRelationship(user,email,"strength",score)
except Exception,e:
print str(e.message)+" -2"
continue
finally:print email+"-> STR:"+str(score)
#refine selected list of users for chat
def refineChatList(self,user,catId):
categoryKey ="cat"+catId
friendMgr = FriendshipManagerFRS()
uMgr =UserManagerFRS()
#sorted on expereince about product category
expFriends = friendMgr.selectFriendsForChatOnExp(user,categoryKey)
print "sorted friendlist from highest experience about product category: \n \n"+str( expFriends)
#sorted on relationship strength
closeFriends = friendMgr.selectAllFriends(user)
print "\n\nsorted friendlist from highest relationship strength :\n \n"+str(closeFriends)
#merge the lists
mixList=self.mixLists(closeFriends,expFriends)
#perpare final list
finalList =[]
for item in mixList:
friend={'friend':str(item)}
finalList.append(friend)
return finalList
#merge two user lists with same length and remove reducdency
def mixLists(self,closeFriends,expFriends):
finalList=[]
for OutItem in closeFriends:
chk =finalList.count(OutItem['email'])
if chk==0:
finalList.append(OutItem['email'])
else:continue
for InItem in expFriends:
chkIn =finalList.count(InItem['email'])
if chkIn ==0:
if OutItem!=InItem:
finalList.append(InItem['email'])
break
else:continue
return finalList
#suggest New Friends for User
def suggestNewFriends(self,email):
#get favourite product category
fMgr =FriendshipManagerFRS()
categories =self.sortUserPreferences(email)
maxCat = self.getMaxCategory(categories)
print "product category favourations of user: "+str(email)+" are :\n"+str(categories)
print "most favourite category of user is : cat-"+str(maxCat)
print '\ntherefore the friends of friends of user are suggested sorted according to this category value'
key =0
value=0
for index in maxCat:
key = index
value = maxCat[index]
break
category= "cat"+str(key)
#select the friends of friends with favouration to the same category(sorted desc) which user node does
candidates =fMgr.selectFriendsForFriendship(email,category)
#dispatch
#for user in candidates:
# print str(user['email'])+"->"+str(user[category])
return candidates
#replace existing relationship with random values
#def replaceRandomValuesInRel(self,fEmail,sEmail):
# #generating values
# fMgr = FriendshipManagerFRS()
# dates = fMgr.getRandomDate()
# chats = int(random.uniform(0,200))
# nVotes = int(random.uniform(0,200))
# pVotes = int(random.uniform(0,200))
# score= 0
# try:
# #update relationship
# fMgr.upgradeRelationship(fEmail,sEmail,"chats",str(chats))
# fMgr.upgradeRelationship(fEmail,sEmail,"nVotes",str(nVotes))
# fMgr.upgradeRelationship(fEmail,sEmail,"pVotes",str(pVotes))
# fMgr.upgradeRelationship(fEmail,sEmail,"started",str(dates[0]))
# fMgr.upgradeRelationship(fEmail,sEmail,"triggered",str(dates[1]))
# fMgr.upgradeRelationship(fEmail,sEmail,"duration",str(dates[2]))
# except Exception ,e:
# print e.message
# return False
# finally:
# #calculate and save frienship score
# score = self.makeFriendshipScore(fEmail,sEmail)
# | maxCat ={maxIndex:maxValue}
return maxCat | random_line_split |
proxy.go | log.Info("service is deleted, cleanup service and endpoints")
p.cleanupService(request.NamespacedName)
return Result{}, nil
}
return Result{}, err
}
// if service is updated to a invalid service, we take it as deleted and cleanup related resources
if p.shouldSkipService(&service) {
log.Info("service has no ClusterIP, skip it", "service", service)
p.cleanupService(request.NamespacedName)
return Result{}, nil
}
changed := p.syncServiceInfoFromService(request.NamespacedName, &service)
if changed {
p.syncServiceChangesToAgentByKey(request.NamespacedName)
}
return Result{}, nil
}
// syncServiceInfoFromService only sync clusterIP, sessionAffinity and StickyMaxAgeSeconds as needed
// if these are the same, just skip synchronizing
func (p *proxy) syncServiceInfoFromService(key ObjectKey, svc *corev1.Service) bool {
p.mu.Lock()
defer p.mu.Unlock()
oldService := p.serviceMap[key]
newService := makeServiceInfo(svc)
if oldService.ClusterIP == newService.ClusterIP &&
oldService.SessionAffinity == newService.SessionAffinity &&
oldService.StickyMaxAgeSeconds == newService.StickyMaxAgeSeconds {
return false
}
oldService.ClusterIP = newService.ClusterIP
oldService.SessionAffinity = newService.SessionAffinity
oldService.StickyMaxAgeSeconds = newService.StickyMaxAgeSeconds
if oldService.EndpointMap == nil {
oldService.EndpointMap = make(map[Port]EndpointSet)
}
if oldService.EndpointToNodes == nil {
oldService.EndpointToNodes = make(map[Endpoint]NodeName)
}
p.serviceMap[key] = oldService
return true
}
func (p *proxy) cleanupService(serviceKey ObjectKey) {
p.mu.Lock()
defer p.mu.Unlock()
serviceInfo, exists := p.serviceMap[serviceKey]
if !exists {
return
}
// cleanup endpoints in related edge node
for port := range serviceInfo.EndpointMap {
spn := ServicePortName{NamespacedName: serviceKey, Port: port.Port, Protocol: port.Protocol}
for _, nodeName := range serviceInfo.EndpointToNodes {
node, exists := p.nodeSet[nodeName]
if !exists {
continue
}
delete(node.ServicePortMap, spn)
delete(node.EndpointMap, spn)
p.nodeSet[nodeName] = node
}
}
p.syncServiceChangesToAgent(serviceInfo)
delete(p.serviceMap, serviceKey)
}
func (p *proxy) syncServiceChangesToAgentByKey(key ObjectKey) {
p.mu.Lock()
defer p.mu.Unlock()
serviceInfo, ok := p.serviceMap[key]
if !ok {
return
}
p.syncServiceChangesToAgent(serviceInfo)
}
func (p *proxy) syncServiceChangesToAgent(serviceInfo ServiceInfo) {
for _, name := range serviceInfo.EndpointToNodes {
node, ok := p.nodeSet[name]
if !ok {
continue
}
p.keeper.AddNode(node)
}
}
func (p *proxy) OnEndpointSliceUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
log := p.log.WithValues("request", request)
var es EndpointSlice
err := p.client.Get(ctx, request.NamespacedName, &es)
if err != nil {
if errors.IsNotFound(err) {
log.Info("endpointslice is deleted, cleanup related endpoints")
p.cleanupEndpointsOfEndpointSlice(request.NamespacedName)
return reconcile.Result{}, nil
}
log.Error(err, "failed to get endpointslice")
return reconcile.Result{}, err
}
if es.DeletionTimestamp != nil {
log.Info("endpointslice is terminating, cleanup related endpoints")
p.cleanupEndpointsOfEndpointSlice(request.NamespacedName)
return reconcile.Result{}, nil
}
serviceName := getServiceName(es.Labels)
if serviceName == "" {
log.Info("no service name found in endpointslice", "endpointslice", es)
return Result{}, nil
}
var (
service corev1.Service
serviceKey = ObjectKey{Name: serviceName, Namespace: request.Namespace}
)
if err = p.client.Get(ctx, serviceKey, &service); err != nil {
log.Error(err, "failed to get service")
// if service is not found, we don't handle this endpointslice
if errors.IsNotFound(err) {
log.Info("Corresponding service is not found, cleanup service and endpoints", "serviceKey", serviceKey)
p.cleanupService(serviceKey)
return Result{}, nil
}
return Result{}, err
}
if p.shouldSkipService(&service) {
log.Info("service has no ClusterIP, skip it", "service", service)
p.cleanupService(serviceKey)
return Result{}, nil
}
serviceChanged := p.syncServiceInfoFromService(serviceKey, &service)
p.syncServiceEndpointsFromEndpointSlice(p.makeEndpointSliceInfo(&es), serviceChanged)
return Result{}, nil
}
func (p *proxy) syncServiceEndpointsFromEndpointSlice(newES EndpointSliceInfo, serviceChanged bool) {
p.mu.Lock()
defer p.mu.Unlock()
key, serviceKey := newES.ObjectKey, newES.ServiceKey
oldES := p.endpointSliceMap[key]
isSame := reflect.DeepEqual(oldES, newES)
if isSame {
return
}
serviceInfo := p.serviceMap[serviceKey]
// collect node which has endpoints changes
changedNodeNames := stringset.New()
// add new endpoints
for port := range newES.Ports {
servicePortName := ServicePortName{
NamespacedName: serviceKey,
Port: port.Port,
Protocol: port.Protocol,
}
endpointSet := serviceInfo.EndpointMap[port]
for _, ep := range newES.Endpoints {
endpoint := Endpoint{
IP: ep.IP,
Port: port.Port,
}
endpointSet.Add(endpoint)
serviceInfo.EndpointToNodes[endpoint] = ep.NodeName
p.addServicePortToNode(ep.NodeName, servicePortName, ServicePort{
ClusterIP: serviceInfo.ClusterIP,
Port: port.Port,
Protocol: port.Protocol,
SessionAffinity: serviceInfo.SessionAffinity,
StickyMaxAgeSeconds: serviceInfo.StickyMaxAgeSeconds,
})
added := p.addEndpointToNode(ep.NodeName, servicePortName, endpoint)
if serviceChanged || added {
changedNodeNames.Add(ep.NodeName)
}
}
serviceInfo.EndpointMap[port] = endpointSet
}
// remove old endpoints
for port := range oldES.Ports {
_, exists := newES.Ports[port]
portRemoved := !exists
servicePortName := ServicePortName{
NamespacedName: serviceKey,
Port: port.Port,
Protocol: port.Protocol,
}
endpointSet := serviceInfo.EndpointMap[port]
for _, ep := range oldES.Endpoints {
_, exist := newES.Endpoints[ep.IP]
endpointRemoved := !exist
if portRemoved || endpointRemoved {
endpoint := Endpoint{
IP: ep.IP,
Port: port.Port,
}
endpointSet.Remove(endpoint)
delete(serviceInfo.EndpointToNodes, endpoint)
p.removeEndpointFromNode(ep.NodeName, servicePortName, endpoint)
changedNodeNames.Add(ep.NodeName)
}
if portRemoved {
p.removeServicePortFromNode(ep.NodeName, servicePortName)
}
}
if len(endpointSet) == 0 {
delete(serviceInfo.EndpointMap, port)
} else {
serviceInfo.EndpointMap[port] = endpointSet
}
}
p.endpointSliceMap[key] = newES
p.serviceMap[serviceKey] = serviceInfo
for nodeName := range changedNodeNames {
node, ok := p.nodeSet[nodeName]
if !ok {
continue
}
p.keeper.AddNode(node)
}
}
func (p *proxy) cleanupEndpointsOfEndpointSlice(key ObjectKey) {
es, ok := p.getEndpointSliceInfo(key)
if !ok {
return
}
// no matter what caused cleanup, we take current endpointslice which
// has empty ports and endpoints as deleted,
es.Ports = make(map[Port]Empty)
es.Endpoints = make(map[string]EndpointInfo)
p.syncServiceEndpointsFromEndpointSlice(es, false)
p.mu.Lock()
delete(p.endpointSliceMap, key)
p.mu.Unlock()
}
func (p *proxy) startCheckLoadBalanceRules(ctx context.Context) error {
tick := time.NewTicker(p.checkInterval)
for {
select {
case <-tick.C:
for _, node := range p.nodeSet {
p.keeper.AddNodeIfNotPresent(node)
}
case <-ctx.Done():
return nil
}
}
}
func (p *proxy) addEndpointToNode(nodeName string, spn ServicePortName, endpoint Endpoint) bool | {
node, ok := p.nodeSet[nodeName]
if !ok {
node = newEdgeNode(nodeName)
}
endpointSet := node.EndpointMap[spn]
if endpointSet.Contains(endpoint) {
return false
}
endpointSet.Add(endpoint)
node.EndpointMap[spn] = endpointSet
p.nodeSet[nodeName] = node
return true
} | identifier_body |
|
proxy.go | := p.serviceMap[key]
if !ok {
return
}
p.syncServiceChangesToAgent(serviceInfo)
}
func (p *proxy) syncServiceChangesToAgent(serviceInfo ServiceInfo) {
for _, name := range serviceInfo.EndpointToNodes {
node, ok := p.nodeSet[name]
if !ok {
continue
}
p.keeper.AddNode(node)
}
}
func (p *proxy) OnEndpointSliceUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
log := p.log.WithValues("request", request)
var es EndpointSlice
err := p.client.Get(ctx, request.NamespacedName, &es)
if err != nil {
if errors.IsNotFound(err) {
log.Info("endpointslice is deleted, cleanup related endpoints")
p.cleanupEndpointsOfEndpointSlice(request.NamespacedName)
return reconcile.Result{}, nil
}
log.Error(err, "failed to get endpointslice")
return reconcile.Result{}, err
}
if es.DeletionTimestamp != nil {
log.Info("endpointslice is terminating, cleanup related endpoints")
p.cleanupEndpointsOfEndpointSlice(request.NamespacedName)
return reconcile.Result{}, nil
}
serviceName := getServiceName(es.Labels)
if serviceName == "" {
log.Info("no service name found in endpointslice", "endpointslice", es)
return Result{}, nil
}
var (
service corev1.Service
serviceKey = ObjectKey{Name: serviceName, Namespace: request.Namespace}
)
if err = p.client.Get(ctx, serviceKey, &service); err != nil {
log.Error(err, "failed to get service")
// if service is not found, we don't handle this endpointslice
if errors.IsNotFound(err) {
log.Info("Corresponding service is not found, cleanup service and endpoints", "serviceKey", serviceKey)
p.cleanupService(serviceKey)
return Result{}, nil
}
return Result{}, err
}
if p.shouldSkipService(&service) {
log.Info("service has no ClusterIP, skip it", "service", service)
p.cleanupService(serviceKey)
return Result{}, nil
}
serviceChanged := p.syncServiceInfoFromService(serviceKey, &service)
p.syncServiceEndpointsFromEndpointSlice(p.makeEndpointSliceInfo(&es), serviceChanged)
return Result{}, nil
}
func (p *proxy) syncServiceEndpointsFromEndpointSlice(newES EndpointSliceInfo, serviceChanged bool) {
p.mu.Lock()
defer p.mu.Unlock()
key, serviceKey := newES.ObjectKey, newES.ServiceKey
oldES := p.endpointSliceMap[key]
isSame := reflect.DeepEqual(oldES, newES)
if isSame {
return
}
serviceInfo := p.serviceMap[serviceKey]
// collect node which has endpoints changes
changedNodeNames := stringset.New()
// add new endpoints
for port := range newES.Ports {
servicePortName := ServicePortName{
NamespacedName: serviceKey,
Port: port.Port,
Protocol: port.Protocol,
}
endpointSet := serviceInfo.EndpointMap[port]
for _, ep := range newES.Endpoints {
endpoint := Endpoint{
IP: ep.IP,
Port: port.Port,
}
endpointSet.Add(endpoint)
serviceInfo.EndpointToNodes[endpoint] = ep.NodeName
p.addServicePortToNode(ep.NodeName, servicePortName, ServicePort{
ClusterIP: serviceInfo.ClusterIP,
Port: port.Port,
Protocol: port.Protocol,
SessionAffinity: serviceInfo.SessionAffinity,
StickyMaxAgeSeconds: serviceInfo.StickyMaxAgeSeconds,
})
added := p.addEndpointToNode(ep.NodeName, servicePortName, endpoint)
if serviceChanged || added {
changedNodeNames.Add(ep.NodeName)
}
}
serviceInfo.EndpointMap[port] = endpointSet
}
// remove old endpoints
for port := range oldES.Ports {
_, exists := newES.Ports[port]
portRemoved := !exists
servicePortName := ServicePortName{
NamespacedName: serviceKey,
Port: port.Port,
Protocol: port.Protocol,
}
endpointSet := serviceInfo.EndpointMap[port]
for _, ep := range oldES.Endpoints {
_, exist := newES.Endpoints[ep.IP]
endpointRemoved := !exist
if portRemoved || endpointRemoved {
endpoint := Endpoint{
IP: ep.IP,
Port: port.Port,
}
endpointSet.Remove(endpoint)
delete(serviceInfo.EndpointToNodes, endpoint)
p.removeEndpointFromNode(ep.NodeName, servicePortName, endpoint)
changedNodeNames.Add(ep.NodeName)
}
if portRemoved {
p.removeServicePortFromNode(ep.NodeName, servicePortName)
}
}
if len(endpointSet) == 0 {
delete(serviceInfo.EndpointMap, port)
} else {
serviceInfo.EndpointMap[port] = endpointSet
}
}
p.endpointSliceMap[key] = newES
p.serviceMap[serviceKey] = serviceInfo
for nodeName := range changedNodeNames {
node, ok := p.nodeSet[nodeName]
if !ok {
continue
}
p.keeper.AddNode(node)
}
}
func (p *proxy) cleanupEndpointsOfEndpointSlice(key ObjectKey) {
es, ok := p.getEndpointSliceInfo(key)
if !ok {
return
}
// no matter what caused cleanup, we take current endpointslice which
// has empty ports and endpoints as deleted,
es.Ports = make(map[Port]Empty)
es.Endpoints = make(map[string]EndpointInfo)
p.syncServiceEndpointsFromEndpointSlice(es, false)
p.mu.Lock()
delete(p.endpointSliceMap, key)
p.mu.Unlock()
}
func (p *proxy) startCheckLoadBalanceRules(ctx context.Context) error {
tick := time.NewTicker(p.checkInterval)
for {
select {
case <-tick.C:
for _, node := range p.nodeSet {
p.keeper.AddNodeIfNotPresent(node)
}
case <-ctx.Done():
return nil
}
}
}
func (p *proxy) addEndpointToNode(nodeName string, spn ServicePortName, endpoint Endpoint) bool {
node, ok := p.nodeSet[nodeName]
if !ok {
node = newEdgeNode(nodeName)
}
endpointSet := node.EndpointMap[spn]
if endpointSet.Contains(endpoint) {
return false
}
endpointSet.Add(endpoint)
node.EndpointMap[spn] = endpointSet
p.nodeSet[nodeName] = node
return true
}
func (p *proxy) removeEndpointFromNode(nodeName string, spn ServicePortName, endpoint Endpoint) {
node, ok := p.nodeSet[nodeName]
if !ok {
return
}
eps := node.EndpointMap[spn]
eps.Remove(endpoint)
if len(eps) == 0 {
delete(node.EndpointMap, spn)
}
p.nodeSet[nodeName] = node
}
func (p *proxy) addServicePortToNode(nodeName string, spn ServicePortName, servicePort ServicePort) {
node, ok := p.nodeSet[nodeName]
if !ok {
node = newEdgeNode(nodeName)
}
node.ServicePortMap[spn] = servicePort
p.nodeSet[nodeName] = node
}
func (p *proxy) removeServicePortFromNode(nodeName string, spn ServicePortName) {
node, ok := p.nodeSet[nodeName]
if !ok {
return
}
delete(node.ServicePortMap, spn)
p.nodeSet[nodeName] = node
}
func (p *proxy) makeEndpointSliceInfo(es *EndpointSlice) EndpointSliceInfo {
info := EndpointSliceInfo{
ObjectKey: ObjectKey{
Name: es.Name,
Namespace: es.Namespace,
},
ServiceKey: ObjectKey{
Name: getServiceName(es.Labels),
Namespace: es.Namespace,
},
Ports: make(map[Port]Empty),
Endpoints: make(map[string]EndpointInfo),
}
for _, port := range es.Ports {
p := Port{
Port: *port.Port,
Protocol: *port.Protocol,
}
info.Ports[p] = Empty{}
}
for _, ep := range es.Endpoints {
nodeName := getHostname(&ep)
if _, exists := p.nodeSet[nodeName]; !exists {
continue
}
// 在边缘场景, endpoint的稳定性有些问题, 会导致conditions.Ready的状态反复变化
// 暂时原因不明,所以我们不考虑这个问题
// todo: 处理网络抖动导致的endpoint不稳定情况
info.Endpoints[ep.Addresses[0]] = EndpointInfo{
IP: ep.Addresses[0],
NodeName: nodeName,
}
}
return info
}
func (p *proxy) getEndpointSliceInfo(key ObjectKey) (EndpointSliceInfo, bool) {
p.mu.Lock()
defer p.mu.Unlock()
es, ok := p.endpointSliceMap[key]
return es, ok
} |
func (p *proxy) shouldSkipService(svc *corev1.Service) bool {
if svc.Spec.Type != corev1.ServiceTypeClusterIP {
return true | random_line_split |
|
proxy.go | EndpointSliceUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
log := p.log.WithValues("request", request)
var es EndpointSlice
err := p.client.Get(ctx, request.NamespacedName, &es)
if err != nil {
if errors.IsNotFound(err) {
log.Info("endpointslice is deleted, cleanup related endpoints")
p.cleanupEndpointsOfEndpointSlice(request.NamespacedName)
return reconcile.Result{}, nil
}
log.Error(err, "failed to get endpointslice")
return reconcile.Result{}, err
}
if es.DeletionTimestamp != nil {
log.Info("endpointslice is terminating, cleanup related endpoints")
p.cleanupEndpointsOfEndpointSlice(request.NamespacedName)
return reconcile.Result{}, nil
}
serviceName := getServiceName(es.Labels)
if serviceName == "" {
log.Info("no service name found in endpointslice", "endpointslice", es)
return Result{}, nil
}
var (
service corev1.Service
serviceKey = ObjectKey{Name: serviceName, Namespace: request.Namespace}
)
if err = p.client.Get(ctx, serviceKey, &service); err != nil {
log.Error(err, "failed to get service")
// if service is not found, we don't handle this endpointslice
if errors.IsNotFound(err) {
log.Info("Corresponding service is not found, cleanup service and endpoints", "serviceKey", serviceKey)
p.cleanupService(serviceKey)
return Result{}, nil
}
return Result{}, err
}
if p.shouldSkipService(&service) {
log.Info("service has no ClusterIP, skip it", "service", service)
p.cleanupService(serviceKey)
return Result{}, nil
}
serviceChanged := p.syncServiceInfoFromService(serviceKey, &service)
p.syncServiceEndpointsFromEndpointSlice(p.makeEndpointSliceInfo(&es), serviceChanged)
return Result{}, nil
}
func (p *proxy) syncServiceEndpointsFromEndpointSlice(newES EndpointSliceInfo, serviceChanged bool) {
p.mu.Lock()
defer p.mu.Unlock()
key, serviceKey := newES.ObjectKey, newES.ServiceKey
oldES := p.endpointSliceMap[key]
isSame := reflect.DeepEqual(oldES, newES)
if isSame {
return
}
serviceInfo := p.serviceMap[serviceKey]
// collect node which has endpoints changes
changedNodeNames := stringset.New()
// add new endpoints
for port := range newES.Ports {
servicePortName := ServicePortName{
NamespacedName: serviceKey,
Port: port.Port,
Protocol: port.Protocol,
}
endpointSet := serviceInfo.EndpointMap[port]
for _, ep := range newES.Endpoints {
endpoint := Endpoint{
IP: ep.IP,
Port: port.Port,
}
endpointSet.Add(endpoint)
serviceInfo.EndpointToNodes[endpoint] = ep.NodeName
p.addServicePortToNode(ep.NodeName, servicePortName, ServicePort{
ClusterIP: serviceInfo.ClusterIP,
Port: port.Port,
Protocol: port.Protocol,
SessionAffinity: serviceInfo.SessionAffinity,
StickyMaxAgeSeconds: serviceInfo.StickyMaxAgeSeconds,
})
added := p.addEndpointToNode(ep.NodeName, servicePortName, endpoint)
if serviceChanged || added {
changedNodeNames.Add(ep.NodeName)
}
}
serviceInfo.EndpointMap[port] = endpointSet
}
// remove old endpoints
for port := range oldES.Ports {
_, exists := newES.Ports[port]
portRemoved := !exists
servicePortName := ServicePortName{
NamespacedName: serviceKey,
Port: port.Port,
Protocol: port.Protocol,
}
endpointSet := serviceInfo.EndpointMap[port]
for _, ep := range oldES.Endpoints {
_, exist := newES.Endpoints[ep.IP]
endpointRemoved := !exist
if portRemoved || endpointRemoved {
endpoint := Endpoint{
IP: ep.IP,
Port: port.Port,
}
endpointSet.Remove(endpoint)
delete(serviceInfo.EndpointToNodes, endpoint)
p.removeEndpointFromNode(ep.NodeName, servicePortName, endpoint)
changedNodeNames.Add(ep.NodeName)
}
if portRemoved {
p.removeServicePortFromNode(ep.NodeName, servicePortName)
}
}
if len(endpointSet) == 0 {
delete(serviceInfo.EndpointMap, port)
} else {
serviceInfo.EndpointMap[port] = endpointSet
}
}
p.endpointSliceMap[key] = newES
p.serviceMap[serviceKey] = serviceInfo
for nodeName := range changedNodeNames {
node, ok := p.nodeSet[nodeName]
if !ok {
continue
}
p.keeper.AddNode(node)
}
}
func (p *proxy) cleanupEndpointsOfEndpointSlice(key ObjectKey) {
es, ok := p.getEndpointSliceInfo(key)
if !ok {
return
}
// no matter what caused cleanup, we take current endpointslice which
// has empty ports and endpoints as deleted,
es.Ports = make(map[Port]Empty)
es.Endpoints = make(map[string]EndpointInfo)
p.syncServiceEndpointsFromEndpointSlice(es, false)
p.mu.Lock()
delete(p.endpointSliceMap, key)
p.mu.Unlock()
}
func (p *proxy) startCheckLoadBalanceRules(ctx context.Context) error {
tick := time.NewTicker(p.checkInterval)
for {
select {
case <-tick.C:
for _, node := range p.nodeSet {
p.keeper.AddNodeIfNotPresent(node)
}
case <-ctx.Done():
return nil
}
}
}
func (p *proxy) addEndpointToNode(nodeName string, spn ServicePortName, endpoint Endpoint) bool {
node, ok := p.nodeSet[nodeName]
if !ok {
node = newEdgeNode(nodeName)
}
endpointSet := node.EndpointMap[spn]
if endpointSet.Contains(endpoint) {
return false
}
endpointSet.Add(endpoint)
node.EndpointMap[spn] = endpointSet
p.nodeSet[nodeName] = node
return true
}
func (p *proxy) removeEndpointFromNode(nodeName string, spn ServicePortName, endpoint Endpoint) {
node, ok := p.nodeSet[nodeName]
if !ok {
return
}
eps := node.EndpointMap[spn]
eps.Remove(endpoint)
if len(eps) == 0 {
delete(node.EndpointMap, spn)
}
p.nodeSet[nodeName] = node
}
func (p *proxy) addServicePortToNode(nodeName string, spn ServicePortName, servicePort ServicePort) {
node, ok := p.nodeSet[nodeName]
if !ok {
node = newEdgeNode(nodeName)
}
node.ServicePortMap[spn] = servicePort
p.nodeSet[nodeName] = node
}
func (p *proxy) removeServicePortFromNode(nodeName string, spn ServicePortName) {
node, ok := p.nodeSet[nodeName]
if !ok {
return
}
delete(node.ServicePortMap, spn)
p.nodeSet[nodeName] = node
}
func (p *proxy) makeEndpointSliceInfo(es *EndpointSlice) EndpointSliceInfo {
info := EndpointSliceInfo{
ObjectKey: ObjectKey{
Name: es.Name,
Namespace: es.Namespace,
},
ServiceKey: ObjectKey{
Name: getServiceName(es.Labels),
Namespace: es.Namespace,
},
Ports: make(map[Port]Empty),
Endpoints: make(map[string]EndpointInfo),
}
for _, port := range es.Ports {
p := Port{
Port: *port.Port,
Protocol: *port.Protocol,
}
info.Ports[p] = Empty{}
}
for _, ep := range es.Endpoints {
nodeName := getHostname(&ep)
if _, exists := p.nodeSet[nodeName]; !exists {
continue
}
// 在边缘场景, endpoint的稳定性有些问题, 会导致conditions.Ready的状态反复变化
// 暂时原因不明,所以我们不考虑这个问题
// todo: 处理网络抖动导致的endpoint不稳定情况
info.Endpoints[ep.Addresses[0]] = EndpointInfo{
IP: ep.Addresses[0],
NodeName: nodeName,
}
}
return info
}
func (p *proxy) getEndpointSliceInfo(key ObjectKey) (EndpointSliceInfo, bool) {
p.mu.Lock()
defer p.mu.Unlock()
es, ok := p.endpointSliceMap[key]
return es, ok
}
func (p *proxy) shouldSkipService(svc *corev1.Service) bool {
if svc.Spec.Type != corev1.ServiceTypeClusterIP {
return true
}
if svc.Spec.ClusterIP == corev1.ClusterIPNone || svc.Spec.ClusterIP == "" {
return true
}
if svc.Spec.Selector == nil || len(svc.Spec.Selector) == 0 {
return true
}
return false
}
func makeServiceInfo(svc *corev1.Service) ServiceInfo {
var stickyMaxAgeSeconds int32
if svc.Spec.SessionAffinity | == corev1.Serv | identifier_name |
|
proxy.go | [name]
if !ok {
continue
}
p.keeper.AddNode(node)
}
}
func (p *proxy) OnEndpointSliceUpdate(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
log := p.log.WithValues("request", request)
var es EndpointSlice
err := p.client.Get(ctx, request.NamespacedName, &es)
if err != nil {
if errors.IsNotFound(err) {
log.Info("endpointslice is deleted, cleanup related endpoints")
p.cleanupEndpointsOfEndpointSlice(request.NamespacedName)
return reconcile.Result{}, nil
}
log.Error(err, "failed to get endpointslice")
return reconcile.Result{}, err
}
if es.DeletionTimestamp != nil {
log.Info("endpointslice is terminating, cleanup related endpoints")
p.cleanupEndpointsOfEndpointSlice(request.NamespacedName)
return reconcile.Result{}, nil
}
serviceName := getServiceName(es.Labels)
if serviceName == "" {
log.Info("no service name found in endpointslice", "endpointslice", es)
return Result{}, nil
}
var (
service corev1.Service
serviceKey = ObjectKey{Name: serviceName, Namespace: request.Namespace}
)
if err = p.client.Get(ctx, serviceKey, &service); err != nil {
log.Error(err, "failed to get service")
// if service is not found, we don't handle this endpointslice
if errors.IsNotFound(err) {
log.Info("Corresponding service is not found, cleanup service and endpoints", "serviceKey", serviceKey)
p.cleanupService(serviceKey)
return Result{}, nil
}
return Result{}, err
}
if p.shouldSkipService(&service) {
log.Info("service has no ClusterIP, skip it", "service", service)
p.cleanupService(serviceKey)
return Result{}, nil
}
serviceChanged := p.syncServiceInfoFromService(serviceKey, &service)
p.syncServiceEndpointsFromEndpointSlice(p.makeEndpointSliceInfo(&es), serviceChanged)
return Result{}, nil
}
func (p *proxy) syncServiceEndpointsFromEndpointSlice(newES EndpointSliceInfo, serviceChanged bool) {
p.mu.Lock()
defer p.mu.Unlock()
key, serviceKey := newES.ObjectKey, newES.ServiceKey
oldES := p.endpointSliceMap[key]
isSame := reflect.DeepEqual(oldES, newES)
if isSame {
return
}
serviceInfo := p.serviceMap[serviceKey]
// collect node which has endpoints changes
changedNodeNames := stringset.New()
// add new endpoints
for port := range newES.Ports {
servicePortName := ServicePortName{
NamespacedName: serviceKey,
Port: port.Port,
Protocol: port.Protocol,
}
endpointSet := serviceInfo.EndpointMap[port]
for _, ep := range newES.Endpoints {
endpoint := Endpoint{
IP: ep.IP,
Port: port.Port,
}
endpointSet.Add(endpoint)
serviceInfo.EndpointToNodes[endpoint] = ep.NodeName
p.addServicePortToNode(ep.NodeName, servicePortName, ServicePort{
ClusterIP: serviceInfo.ClusterIP,
Port: port.Port,
Protocol: port.Protocol,
SessionAffinity: serviceInfo.SessionAffinity,
StickyMaxAgeSeconds: serviceInfo.StickyMaxAgeSeconds,
})
added := p.addEndpointToNode(ep.NodeName, servicePortName, endpoint)
if serviceChanged || added {
changedNodeNames.Add(ep.NodeName)
}
}
serviceInfo.EndpointMap[port] = endpointSet
}
// remove old endpoints
for port := range oldES.Ports {
_, exists := newES.Ports[port]
portRemoved := !exists
servicePortName := ServicePortName{
NamespacedName: serviceKey,
Port: port.Port,
Protocol: port.Protocol,
}
endpointSet := serviceInfo.EndpointMap[port]
for _, ep := range oldES.Endpoints {
_, exist := newES.Endpoints[ep.IP]
endpointRemoved := !exist
if portRemoved || endpointRemoved {
endpoint := Endpoint{
IP: ep.IP,
Port: port.Port,
}
endpointSet.Remove(endpoint)
delete(serviceInfo.EndpointToNodes, endpoint)
p.removeEndpointFromNode(ep.NodeName, servicePortName, endpoint)
changedNodeNames.Add(ep.NodeName)
}
if portRemoved {
p.removeServicePortFromNode(ep.NodeName, servicePortName)
}
}
if len(endpointSet) == 0 {
delete(serviceInfo.EndpointMap, port)
} else {
serviceInfo.EndpointMap[port] = endpointSet
}
}
p.endpointSliceMap[key] = newES
p.serviceMap[serviceKey] = serviceInfo
for nodeName := range changedNodeNames {
node, ok := p.nodeSet[nodeName]
if !ok {
continue
}
p.keeper.AddNode(node)
}
}
func (p *proxy) cleanupEndpointsOfEndpointSlice(key ObjectKey) {
es, ok := p.getEndpointSliceInfo(key)
if !ok {
return
}
// no matter what caused cleanup, we take current endpointslice which
// has empty ports and endpoints as deleted,
es.Ports = make(map[Port]Empty)
es.Endpoints = make(map[string]EndpointInfo)
p.syncServiceEndpointsFromEndpointSlice(es, false)
p.mu.Lock()
delete(p.endpointSliceMap, key)
p.mu.Unlock()
}
func (p *proxy) startCheckLoadBalanceRules(ctx context.Context) error {
tick := time.NewTicker(p.checkInterval)
for {
select {
case <-tick.C:
for _, node := range p.nodeSet {
p.keeper.AddNodeIfNotPresent(node)
}
case <-ctx.Done():
return nil
}
}
}
func (p *proxy) addEndpointToNode(nodeName string, spn ServicePortName, endpoint Endpoint) bool {
node, ok := p.nodeSet[nodeName]
if !ok {
node = newEdgeNode(nodeName)
}
endpointSet := node.EndpointMap[spn]
if endpointSet.Contains(endpoint) {
return false
}
endpointSet.Add(endpoint)
node.EndpointMap[spn] = endpointSet
p.nodeSet[nodeName] = node
return true
}
func (p *proxy) removeEndpointFromNode(nodeName string, spn ServicePortName, endpoint Endpoint) {
node, ok := p.nodeSet[nodeName]
if !ok {
return
}
eps := node.EndpointMap[spn]
eps.Remove(endpoint)
if len(eps) == 0 {
delete(node.EndpointMap, spn)
}
p.nodeSet[nodeName] = node
}
func (p *proxy) addServicePortToNode(nodeName string, spn ServicePortName, servicePort ServicePort) {
node, ok := p.nodeSet[nodeName]
if !ok {
node = newEdgeNode(nodeName)
}
node.ServicePortMap[spn] = servicePort
p.nodeSet[nodeName] = node
}
func (p *proxy) removeServicePortFromNode(nodeName string, spn ServicePortName) {
node, ok := p.nodeSet[nodeName]
if !ok {
return
}
delete(node.ServicePortMap, spn)
p.nodeSet[nodeName] = node
}
func (p *proxy) makeEndpointSliceInfo(es *EndpointSlice) EndpointSliceInfo {
info := EndpointSliceInfo{
ObjectKey: ObjectKey{
Name: es.Name,
Namespace: es.Namespace,
},
ServiceKey: ObjectKey{
Name: getServiceName(es.Labels),
Namespace: es.Namespace,
},
Ports: make(map[Port]Empty),
Endpoints: make(map[string]EndpointInfo),
}
for _, port := range es.Ports {
p := Port{
Port: *port.Port,
Protocol: *port.Protocol,
}
info.Ports[p] = Empty{}
}
for _, ep := range es.Endpoints {
nodeName := getHostname(&ep)
if _, exists := p.nodeSet[nodeName]; !exists {
continue
}
// 在边缘场景, endpoint的稳定性有些问题, 会导致conditions.Ready的状态反复变化
// 暂时原因不明,所以我们不考虑这个问题
// todo: 处理网络抖动导致的endpoint不稳定情况
info.Endpoints[ep.Addresses[0]] = EndpointInfo{
IP: ep.Addresses[0],
NodeName: nodeName,
}
}
return info
}
func (p *proxy) getEndpointSliceInfo(key ObjectKey) (EndpointSliceInfo, bool) {
p.mu.Lock()
defer p.mu.Unlock()
es, ok := p.endpointSliceMap[key]
return es, ok
}
func (p *proxy) shouldSkipService(svc *corev1.Service) bool {
if svc.Spec.Type != corev1.ServiceTypeClusterIP {
return true
}
if svc.Spec.ClusterIP == corev1.ClusterIPNone || svc.Spec.ClusterIP == "" {
return true
}
if svc.Spec.Selector == nil || len(svc.Spec.Selector) == 0 {
return true
}
return fa | lse
}
func makeSe | conditional_block |
|
maimemo_client.rs | brief: String,
created_time: Option<String>,
updated_time: Option<String>,
contents: Option<String>,
}
impl Notepad {
pub fn get_notepad_id(&self) -> &str {
&self.notepad_id
}
pub fn set_contents(&mut self, contents: Option<String>) {
self.contents = contents;
}
pub fn get_contents(&self) -> Option<&str> {
self.contents.as_deref()
}
pub fn get_contents_mut(&mut self) -> Option<&mut String> {
self.contents.as_mut()
}
}
impl fmt::Display for Notepad {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut temp = self.clone();
// 仅输出第一行 与 total length
let contents = temp.contents.as_mut().unwrap();
let total_len = contents.len();
contents.drain(contents.find("\n").unwrap_or(total_len)..);
contents.push_str("... total length: ");
contents.push_str(&total_len.to_string());
write!(f, "{}", serde_json::to_string_pretty(&temp).unwrap())
}
}
#[derive(Debug, Serialize, Deserialize)]
struct ResponseResult {
error: String,
valid: i32,
total: usize,
notepad: Option<Vec<Notepad>>,
}
/// maimemo提供一些访问操作。
pub struct MaimemoClient {
client: Client,
config: AppConfig,
cookie_store: CookieStore,
user_token_name: String,
}
impl std::ops::Drop for MaimemoClient {
/// 在退出时保存cookie store
fn drop(&mut self) {
if let Some(path) = self.config.get_cookie_path() {
if let Err(e) = save_cookie_store(path, &self.cookie_store) {
error!("save cookie store failed: {}", e);
}
}
}
}
impl MaimemoClient {
/// 用config构造一个client。如果config.cookie_path存在则加载,否则使用in memory的cookie store。
pub fn new(config: AppConfig) -> Result<Self, String> {
let cookie_store = build_cookie_store(config.get_cookie_path())?;
Ok(Self {
client: build_general_client()?,
config,
cookie_store: cookie_store,
user_token_name: "userToken".to_string(),
})
}
pub fn get_user_token_val(&self) -> Option<&str> {
self.cookie_store
.get("www.maimemo.com", "/", &self.user_token_name)
.map(|c| c.value())
}
pub fn has_logged(&self) -> bool {
self.get_user_token_val().is_some()
}
/// 登录并更新config.cookies
pub async fn login(&mut self) -> Result<(), String> {
let req_name = "login";
let form = [
("email", self.config.get_username()),
("password", self.config.get_password()),
];
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?;
// login failed
// Check if the user token exists
update_set_cookies(&mut self.cookie_store, &resp);
if !self.has_logged() {
error!(
"update cookie store failed. not found cookie: [{}] in cookie_store",
self.user_token_name
);
Err("login failed. not found cookie store".to_string())
} else {
debug!("login successful");
Ok(())
}
}
/// 提供完整的notepad list调用get_notepad_list与get_notepad_contents
pub async fn get_notepads(&mut self) -> Result<Vec<Notepad>, String> {
let mut notepads = self.get_notepad_list().await?;
for notepad in &mut notepads {
let contents = self.get_notepad_contents(notepad.get_notepad_id()).await?;
notepad.set_contents(Some(contents));
}
Ok(notepads)
}
/// 获取notepad list
pub async fn get_notepad_list(&mut self) -> Result<Vec<Notepad>, String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-search";
// ?token={user_token}
let url_handler = |url: &str| {
let user_token = self.get_user_token_val().expect("not found user token");
url.to_string() + user_token
};
let payload = serde_json::json!({"keyword":null,"scope":"MINE","recommend":false,"offset":0,"limit":30,"total":-1});
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
Some(&payload),
)
.await?;
let result = resp
.json::<ResponseResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(notepad) = result.notepad {
debug!("got notepad list. len: {}", notepad.len());
Ok(notepad)
} else {
error!("get notepad failed: {:?}", result);
Err("get notepad failed".to_string())
}
}
/// 获取notepad中单词文本
pub async fn get_notepad_contents(&self, notepad_id: &str) -> Result<String, String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-detail";
let url_handler = |url: &str| url.to_string() + notepad_id;
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await?;
Self::parse_notepad_text(&resp.text().await.map_err(|e| format!("{:?}", e))?)
}
/// 刷新下载notepad对应的captcha返回文件全路径。
pub async fn refresh_captcha(&self) -> Result<Vec<u8>, String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "service-captcha";
let url_handler = |url: &str| url.to_owned() + &Local::now().timestamp_nanos().to_string();
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await
.map_err(|e| format!("{:?}", e))?;
let contents = resp
.bytes()
.await
.map(|body| body.to_vec())
.map_err(|e| format!("{:?}", e))?;
Ok(contents)
}
/// 保存notepad
///
/// 注意:maimemo要求先获取验证码,再保存。并且要求是同一机器发送的。在win host浏览器刷新验证码,
/// 但在wsl2 保存则不会生效,很可能是对比的发送的数据包是否来自同一机器
pub async fn save_notepad(&self, notepad: Notepad, captcha: String) -> Result<(), String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-save";
if notepad.contents.is_none() {
return Err("notepad contents is none".to_string());
}
// form
let mut form = std::collections::HashMap::new();
form.insert("id".to_string(), notepad.notepad_id);
form.insert("title".to_string(), notepad.title);
form.insert("brief".to_string(), notepad.brief);
form.insert("content".to_string(), notepad.contents.unwrap());
form.insert(
"is_private".to_string(),
(notepad.is_private == 1).to_string(),
);
form.insert("captcha".to_string(), captcha);
let form = form
.iter()
.map(|(key, val)| (key.as_str(), val.as_str()))
.collect::<Vec<_>>();
#[derive(Debug, Serialize, Deserialize)]
struct RespResult {
valid: i8,
#[serde(rename = "errorCode")]
error: Option<String>,
}
let result: RespResult = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?
.json::<RespResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(e) = &result.error {
error!("save notepad failed: {:?}", result);
return Err(format!("save notepad failed: {}", e));
}
debug!("save_notepad successful");
Ok(())
}
/// 从response html body中取出单词文本
fn parse_notepad_text(html: &str) -> Result<String, String> {
if html.is_empty() {
return Err("html is empty".to_string());
}
debug!("parsing notepad html");
let id = "#content";
let id_selector = Selector::parse(id).map_err(| | is_private: u8,
notepad_id: String,
title: String, | random_line_split |
|
maimemo_client.rs | ) -> fmt::Result {
let mut temp = self.clone();
// 仅输出第一行 与 total length
let contents = temp.contents.as_mut().unwrap();
let total_len = contents.len();
contents.drain(contents.find("\n").unwrap_or(total_len)..);
contents.push_str("... total length: ");
contents.push_str(&total_len.to_string());
write!(f, "{}", serde_json::to_string_pretty(&temp).unwrap())
}
}
#[derive(Debug, Serialize, Deserialize)]
struct ResponseResult {
error: String,
valid: i32,
total: usize,
notepad: Option<Vec<Notepad>>,
}
/// maimemo提供一些访问操作。
pub struct MaimemoClient {
client: Client,
config: AppConfig,
cookie_store: CookieStore,
user_token_name: String,
}
impl std::ops::Drop for MaimemoClient {
/// 在退出时保存cookie store
fn drop(&mut self) {
if let Some(path) = self.config.get_cookie_path() {
if let Err(e) = save_cookie_store(path, &self.cookie_store) {
error!("save cookie store failed: {}", e);
}
}
}
}
impl MaimemoClient {
/// 用config构造一个client。如果config.cookie_path存在则加载,否则使用in memory的cookie store。
pub fn new(config: AppConfig) -> Result<Self, String> {
let cookie_store = build_cookie_store(config.get_cookie_path())?;
Ok(Self {
client: build_general_client()?,
config,
cookie_store: cookie_store,
user_token_name: "userToken".to_string(),
})
}
pub fn get_user_token_val(&self) -> Option<&str> {
self.cookie_store
.get("www.maimemo.com", "/", &self.user_token_name)
.map(|c| c.value())
}
pub fn has_logged(&self) -> bool {
self.get_user_token_val().is_some()
}
/// 登录并更新config.cookies
pub async fn login(&mut self) -> Result<(), String> {
let req_name = "login";
let form = [
("email", self.config.get_username()),
("password", self.config.get_password()),
];
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?;
// login failed
// Check if the user token exists
update_set_cookies(&mut self.cookie_store, &resp);
if !self.has_logged() {
error!(
"update cookie store failed. not found cookie: [{}] in cookie_store",
self.user_token_name
);
Err("login failed. not found cookie store".to_string())
} else {
debug!("login successful");
Ok(())
}
}
/// 提供完整的notepad list调用get_notepad_list与get_notepad_contents
pub async fn get_notepads(&mut self) -> Result<Vec<Notepad>, String> {
let mut notepads = self.get_notepad_list().await?;
for notepad in &mut notepads {
let contents = self.get_notepad_contents(notepad.get_notepad_id()).await?;
notepad.set_contents(Some(contents));
}
Ok(notepads)
}
/// 获取notepad list
pub async fn get_notepad_list(&mut self) -> Result<Vec<Notepad>, String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-search";
// ?token={user_token}
let url_handler = |url: &str| {
let user_token = self.get_user_token_val().expect("not found user token");
url.to_string() + user_token
};
let payload = serde_json::json!({"keyword":null,"scope":"MINE","recommend":false,"offset":0,"limit":30,"total":-1});
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
Some(&payload),
)
.await?;
let result = resp
.json::<ResponseResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(notepad) = result.notepad {
debug!("got notepad list. len: {}", notepad.len());
Ok(notepad)
} else {
error!("get notepad failed: {:?}", result);
Err("get notepad failed".to_string())
}
}
/// 获取notepad中单词文本
pub async fn get_notepad_contents(&self, notepad_id: &str) -> Result<String, String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-detail";
let url_handler = |url: &str| url.to_string() + notepad_id;
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await?;
Self::parse_notepad_text(&resp.text().await.map_err(|e| format!("{:?}", e))?)
}
/// 刷新下载notepad对应的captcha返回文件全路径。
pub async fn refresh_captcha(&self) -> Result<Vec<u8>, String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "service-captcha";
let url_handler = |url: &str| url.to_owned() + &Local::now().timestamp_nanos().to_string();
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await
.map_err(|e| format!("{:?}", e))?;
let contents = resp
.bytes()
.await
.map(|body| body.to_vec())
.map_err(|e| format!("{:?}", e))?;
Ok(contents)
}
/// 保存notepad
///
/// 注意:maimemo要求先获取验证码,再保存。并且要求是同一机器发送的。在win host浏览器刷新验证码,
/// 但在wsl2 保存则不会生效,很可能是对比的发送的数据包是否来自同一机器
pub async fn save_notepad(&self, notepad: Notepad, captcha: String) -> Result<(), String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-save";
if notepad.contents.is_none() {
return Err("notepad contents is none".to_string());
}
// form
let mut form = std::collections::HashMap::new();
form.insert("id".to_string(), notepad.notepad_id);
form.insert("title".to_string(), notepad.title);
form.insert("brief".to_string(), notepad.brief);
form.insert("content".to_string(), notepad.contents.unwrap());
form.insert(
"is_private".to_string(),
(notepad.is_private == 1).to_string(),
);
form.insert("captcha".to_string(), captcha);
let form = form
.iter()
.map(|(key, val)| (key.as_str(), val.as_str()))
.collect::<Vec<_>>();
#[derive(Debug, Serialize, Deserialize)]
struct RespResult {
valid: i8,
#[serde(rename = "errorCode")]
error: Option<String>,
}
let result: RespResult = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?
.json::<RespResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(e) = &result.error {
error!("save notepad failed: {:?}", result);
return Err(format!("save notepad failed: {}", e));
}
debug!("save_notepad successful");
Ok(())
}
/// 从response html body中取出单词文本
fn parse_notepad_text(html: &str) -> Result<String, String> {
if html.is_empty() {
return Err("html is empty".to_string());
}
debug!("parsing notepad html");
let id = "#content";
let id_selector = Selector::parse(id).map_err(|e| format!("{:?}", e))?;
let document = Html::parse_document(html);
document
.select(&id | _selector)
.next()
.map(|e| e.inner_html())
.ok_or_else(|| {
error!("not found element {} in html: \n{}", id, html);
format!("not found element {} in html", id)
})
}
}
#[cfg(test)]
mod tests {
use super::*;
const CONFIG_PATH: &str = "config.yml";
#[tokio::test]
async fn try_login() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
client.lo | identifier_body |
|
maimemo_client.rs | impl fmt::Display for Notepad {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut temp = self.clone();
// 仅输出第一行 与 total length
let contents = temp.contents.as_mut().unwrap();
let total_len = contents.len();
contents.drain(contents.find("\n").unwrap_or(total_len)..);
contents.push_str("... total length: ");
contents.push_str(&total_len.to_string());
write!(f, "{}", serde_json::to_string_pretty(&temp).unwrap())
}
}
#[derive(Debug, Serialize, Deserialize)]
struct ResponseResult {
error: String,
valid: i32,
total: usize,
notepad: Option<Vec<Notepad>>,
}
/// maimemo提供一些访问操作。
pub struct MaimemoClient {
client: Client,
config: AppConfig,
cookie_store: CookieStore,
user_token_name: String,
}
impl std::ops::Drop for MaimemoClient {
/// 在退出时保存cookie store
fn drop(&mut self) {
if let Some(path) = self.config.get_cookie_path() {
if let Err(e) = save_cookie_store(path, &self.cookie_store) {
error!("save cookie store failed: {}", e);
}
}
}
}
impl MaimemoClient {
/// 用config构造一个client。如果config.cookie_path存在则加载,否则使用in memory的cookie store。
pub fn new(config: AppConfig) -> Result<Self, String> {
let cookie_store = build_cookie_store(config.get_cookie_path())?;
Ok(Self {
client: build_general_client()?,
config,
cookie_store: cookie_store,
user_token_name: "userToken".to_string(),
})
}
pub fn get_user_token_val(&self) -> Option<&str> {
self.cookie_store
.get("www.maimemo.com", "/", &self.user_token_name)
.map(|c| c.value())
}
pub fn has_logged(&self) -> bool {
self.get_user_token_val().is_some()
}
/// 登录并更新config.cookies
pub async fn login(&mut self) -> Result<(), String> {
let req_name = "login";
let form = [
("email", self.config.get_username()),
("password", self.config.get_password()),
];
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?;
// login failed
// Check if the user token exists
update_set_cookies(&mut self.cookie_store, &resp);
if !self.has_logged() {
error!(
"update cookie store failed. not found cookie: [{}] in cookie_store",
self.user_token_name
);
Err("login failed. not found cookie store".to_string())
} else {
debug!("login successful");
Ok(())
}
}
/// 提供完整的notepad list调用get_notepad_list与get_notepad_contents
pub async fn get_notepads(&mut self) -> Result<Vec<Notepad>, String> {
let mut notepads = self.get_notepad_list().await?;
for notepad in &mut notepads {
let contents = self.get_notepad_contents(notepad.get_notepad_id()).await?;
notepad.set_contents(Some(contents));
}
Ok(notepads)
}
/// 获取notepad list
pub async fn get_notepad_list(&mut self) -> Result<Vec<Notepad>, String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-search";
// ?token={user_ | ser_token = self.get_user_token_val().expect("not found user token");
url.to_string() + user_token
};
let payload = serde_json::json!({"keyword":null,"scope":"MINE","recommend":false,"offset":0,"limit":30,"total":-1});
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
Some(&payload),
)
.await?;
let result = resp
.json::<ResponseResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(notepad) = result.notepad {
debug!("got notepad list. len: {}", notepad.len());
Ok(notepad)
} else {
error!("get notepad failed: {:?}", result);
Err("get notepad failed".to_string())
}
}
/// 获取notepad中单词文本
pub async fn get_notepad_contents(&self, notepad_id: &str) -> Result<String, String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-detail";
let url_handler = |url: &str| url.to_string() + notepad_id;
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await?;
Self::parse_notepad_text(&resp.text().await.map_err(|e| format!("{:?}", e))?)
}
/// 刷新下载notepad对应的captcha返回文件全路径。
pub async fn refresh_captcha(&self) -> Result<Vec<u8>, String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "service-captcha";
let url_handler = |url: &str| url.to_owned() + &Local::now().timestamp_nanos().to_string();
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await
.map_err(|e| format!("{:?}", e))?;
let contents = resp
.bytes()
.await
.map(|body| body.to_vec())
.map_err(|e| format!("{:?}", e))?;
Ok(contents)
}
/// 保存notepad
///
/// 注意:maimemo要求先获取验证码,再保存。并且要求是同一机器发送的。在win host浏览器刷新验证码,
/// 但在wsl2 保存则不会生效,很可能是对比的发送的数据包是否来自同一机器
pub async fn save_notepad(&self, notepad: Notepad, captcha: String) -> Result<(), String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-save";
if notepad.contents.is_none() {
return Err("notepad contents is none".to_string());
}
// form
let mut form = std::collections::HashMap::new();
form.insert("id".to_string(), notepad.notepad_id);
form.insert("title".to_string(), notepad.title);
form.insert("brief".to_string(), notepad.brief);
form.insert("content".to_string(), notepad.contents.unwrap());
form.insert(
"is_private".to_string(),
(notepad.is_private == 1).to_string(),
);
form.insert("captcha".to_string(), captcha);
let form = form
.iter()
.map(|(key, val)| (key.as_str(), val.as_str()))
.collect::<Vec<_>>();
#[derive(Debug, Serialize, Deserialize)]
struct RespResult {
valid: i8,
#[serde(rename = "errorCode")]
error: Option<String>,
}
let result: RespResult = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?
.json::<RespResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(e) = &result.error {
error!("save notepad failed: {:?}", result);
return Err(format!("save notepad failed: {}", e));
}
debug!("save_notepad successful");
Ok(())
}
/// 从response html body中取出单词文本
fn parse_notepad_text(html: &str) -> Result<String, String> {
if html.is_empty() {
return Err("html is empty".to_string());
}
debug!("parsing notepad html");
let id = "#content";
let id_selector = Selector::parse(id).map_err(|e| format!("{:?}", e))?;
let document = Html::parse_document(html);
document
.select(&id_selector)
.next()
.map(|e| e.inner_html())
.ok_or_else(|| {
error!("not found element {} in html: \n{}", id, html);
format!("not found element {} in html", id)
})
}
}
#[cfg(test)]
mod tests {
use super::*;
const CONFIG_PATH: &str = "config.yml";
#[tokio::test]
async fn try_login() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH). | token}
let url_handler = |url: &str| {
let u | conditional_block |
maimemo_client.rs | )]
struct ResponseResult {
error: String,
valid: i32,
total: usize,
notepad: Option<Vec<Notepad>>,
}
/// maimemo提供一些访问操作。
pub struct MaimemoClient {
client: Client,
config: AppConfig,
cookie_store: CookieStore,
user_token_name: String,
}
impl std::ops::Drop for MaimemoClient {
/// 在退出时保存cookie store
fn drop(&mut self) {
if let Some(path) = self.config.get_cookie_path() {
if let Err(e) = save_cookie_store(path, &self.cookie_store) {
error!("save cookie store failed: {}", e);
}
}
}
}
impl MaimemoClient {
/// 用config构造一个client。如果config.cookie_path存在则加载,否则使用in memory的cookie store。
pub fn new(config: AppConfig) -> Result<Self, String> {
let cookie_store = build_cookie_store(config.get_cookie_path())?;
Ok(Self {
client: build_general_client()?,
config,
cookie_store: cookie_store,
user_token_name: "userToken".to_string(),
})
}
pub fn get_user_token_val(&self) -> Option<&str> {
self.cookie_store
.get("www.maimemo.com", "/", &self.user_token_name)
.map(|c| c.value())
}
pub fn has_logged(&self) -> bool {
self.get_user_token_val().is_some()
}
/// 登录并更新config.cookies
pub async fn login(&mut self) -> Result<(), String> {
let req_name = "login";
let form = [
("email", self.config.get_username()),
("password", self.config.get_password()),
];
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?;
// login failed
// Check if the user token exists
update_set_cookies(&mut self.cookie_store, &resp);
if !self.has_logged() {
error!(
"update cookie store failed. not found cookie: [{}] in cookie_store",
self.user_token_name
);
Err("login failed. not found cookie store".to_string())
} else {
debug!("login successful");
Ok(())
}
}
/// 提供完整的notepad list调用get_notepad_list与get_notepad_contents
pub async fn get_notepads(&mut self) -> Result<Vec<Notepad>, String> {
let mut notepads = self.get_notepad_list().await?;
for notepad in &mut notepads {
let contents = self.get_notepad_contents(notepad.get_notepad_id()).await?;
notepad.set_contents(Some(contents));
}
Ok(notepads)
}
/// 获取notepad list
pub async fn get_notepad_list(&mut self) -> Result<Vec<Notepad>, String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-search";
// ?token={user_token}
let url_handler = |url: &str| {
let user_token = self.get_user_token_val().expect("not found user token");
url.to_string() + user_token
};
let payload = serde_json::json!({"keyword":null,"scope":"MINE","recommend":false,"offset":0,"limit":30,"total":-1});
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
Some(&payload),
)
.await?;
let result = resp
.json::<ResponseResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(notepad) = result.notepad {
debug!("got notepad list. len: {}", notepad.len());
Ok(notepad)
} else {
error!("get notepad failed: {:?}", result);
Err("get notepad failed".to_string())
}
}
/// 获取notepad中单词文本
pub async fn get_notepad_contents(&self, notepad_id: &str) -> Result<String, String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-detail";
let url_handler = |url: &str| url.to_string() + notepad_id;
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await?;
Self::parse_notepad_text(&resp.text().await.map_err(|e| format!("{:?}", e))?)
}
/// 刷新下载notepad对应的captcha返回文件全路径。
pub async fn refresh_captcha(&self) -> Result<Vec<u8>, String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "service-captcha";
let url_handler = |url: &str| url.to_owned() + &Local::now().timestamp_nanos().to_string();
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await
.map_err(|e| format!("{:?}", e))?;
let contents = resp
.bytes()
.await
.map(|body| body.to_vec())
.map_err(|e| format!("{:?}", e))?;
Ok(contents)
}
/// 保存notepad
///
/// 注意:maimemo要求先获取验证码,再保存。并且要求是同一机器发送的。在win host浏览器刷新验证码,
/// 但在wsl2 保存则不会生效,很可能是对比的发送的数据包是否来自同一机器
pub async fn save_notepad(&self, notepad: Notepad, captcha: String) -> Result<(), String> {
if !self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-save";
if notepad.contents.is_none() {
return Err("notepad contents is none".to_string());
}
// form
let mut form = std::collections::HashMap::new();
form.insert("id".to_string(), notepad.notepad_id);
form.insert("title".to_string(), notepad.title);
form.insert("brief".to_string(), notepad.brief);
form.insert("content".to_string(), notepad.contents.unwrap());
form.insert(
"is_private".to_string(),
(notepad.is_private == 1).to_string(),
);
form.insert("captcha".to_string(), captcha);
let form = form
.iter()
.map(|(key, val)| (key.as_str(), val.as_str()))
.collect::<Vec<_>>();
#[derive(Debug, Serialize, Deserialize)]
struct RespResult {
valid: i8,
#[serde(rename = "errorCode")]
error: Option<String>,
}
let result: RespResult = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?
.json::<RespResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(e) = &result.error {
error!("save notepad failed: {:?}", result);
return Err(format!("save notepad failed: {}", e));
}
debug!("save_notepad successful");
Ok(())
}
/// 从response html body中取出单词文本
fn parse_notepad_text(html: &str) -> Result<String, String> {
if html.is_empty() {
return Err("html is empty".to_string());
}
debug!("parsing notepad html");
let id = "#content";
let id_selector = Selector::parse(id).map_err(|e| format!("{:?}", e))?;
let document = Html::parse_document(html);
document
.select(&id_selector)
.next()
.map(|e| e.inner_html())
.ok_or_else(|| {
error!("not found element {} in html: \n{}", id, html);
format!("not found element {} in html", id)
})
}
}
#[cfg(test)]
mod tests {
use super::*;
const CONFIG_PATH: &str = "config.yml";
#[tokio::test]
async fn try_login() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
client.login().await.map_err(|e| format!("{:?}", e))?;
Ok(())
}
#[tokio::test]
async fn get_notepad_list() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
if !client.has_logged() {
client.login().await?;
}
let notepads = client.get_notepad_list().await | ?;
asser | identifier_name |
|
note.py | 0])
#sl=cl.getstrlist()
# self.strl=self.strl+sl
def getnexturl(self, nexturllist, cl):
for i in range(len(nexturllist)):
nul=nexturllist[i]
self.nustr=self.nustr+nul[1]+nul[0]+"\n"
cl.gethtml("http://gz.58.com"+nul[0])
uls=cl.geturllist()
if cl.isend():
if i==(len(nexturllist)-1):
cl.gethtml("http://gz.58.com"+nul[0])
nus=cl.getnexturl()
del nus[0:(i+1)]
self.getnexturl(nus, cl)
self.urllist=self.urllist+uls
def threadcl(self):
url="http://gz.58.com/tech/?key=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&cmcskey=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&final=1&jump=2&specialtype=gls&canclequery=isbiz%3D0&sourcetype=4"
self.crawling(url)
'''#时针
transform.translate(50,50)
transform.rotate(hour_angle)
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setPen(Qt.NoPen)
painter.setBrush(QBrush(Qt.darkRed))
painter.drawPolygon(QPolygonF(hourPoints))
transform.reset()
#分针
transform.translate(50,50)
transform.rotate(minite_angle)
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setBrush(QBrush(Qt.darkGreen))
painter.drawPolygon(QPolygonF(minPoints))
transform.reset()
#秒针
transform.translate(50,50)
transform.rotate(-53)#second_angle
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setPen(QPen(Qt.darkCyan,1))
painter.drawLine(50,50,90,20)
'''
class Window(QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.initUI()
def initUI(self):
self.setGeometry(100, 35, 1200, 670)
def paintEvent(self,event):
source.ui.groupBox_show.close()
pt=QPainter(self)
pt.begin(self)
#self.drawRect(pt)
self.drawclock(pt)
pt.end()
def drawRect(self, pt):
pen1=QPen(QColor(225, 225, 225, 225))
rec=QRect(500, 500,500, 500)
pt.setPen(pen1)
pt.drawRect(rec)
pt.setBrush(QColor(0, 0, 0, 255))
pt.drawRect(300, 300, 300, 600)
def drawclock(self, painter):
painter.setRenderHint(QPainter.Antialiasing)
#设置表盘中的文字字体
font=QFont("Times",6)
fm=QFontMetrics(font)
fontRect=fm.boundingRect("99")#获取绘制字体的矩形范围
#分针坐标点
minPoints=[QPointF(50,25),
QPointF(48,50),
QPointF(52,50)]
#时钟坐标点
hourPoints=[QPointF(50,35),
QPointF(48,50),
QPointF(52,50)]
side=min(self.width(),self.height())
painter.setViewport((2*self.width())/5,self.height()/16,
(4*side)/7, (4*side)/7)#始终处于窗口中心位置显示
#设置QPainter的坐标系统,无论窗体大小如何变化,
#窗体左上坐标为(0,0),右下坐标为(100,100),
#因此窗体中心坐标为(50,50)
painter.setWindow(0,0,100,100)
#绘制表盘,使用环形渐变色
niceBlue=QColor(150,150,200)
haloGrident=QRadialGradient(50,50,50,50,50)
haloGrident.setColorAt(0.0,Qt.lightGray)
haloGrident.setColorAt(0.5,Qt.darkGray)
haloGrident.setColorAt(0.9,Qt.white)
haloGrident.setColorAt(1.0,niceBlue)
painter.setBrush(haloGrident)
painter.setPen(QPen(Qt.darkGray,1))
painter.drawEllipse(0,0,100,100)
transform=QTransform()
#绘制时钟为0的字,以及刻度
painter.setPen(QPen(Qt.black,1.5))
fontRect.moveCenter(QPoint(50,10+fontRect.height()/2))
painter.setFont(font)
painter.drawLine(50,2,50,8)#
painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"0")
for i in range(1,12):
transform.translate(50, 50)
transform.rotate(30)
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.drawLine(50,2,50,8)
painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"%d" % i)
transform.reset()
#绘制分钟刻度线
painter.setPen(QPen(Qt.blue,1))
for i in range(1,60):
transform.translate(50,50)
transform.rotate(6)
transform.translate(-50,-50)
if i%5!=0:
painter.setWorldTransform(transform)
painter.drawLine(50,2,50,5)
transform.reset()
#获取当前时间
currentTime=QTime().currentTime()
#hour=currentTime.hour() if currentTime.hour()<12 else currentTime.hour()-12
#minite=currentTime.minute()
second=currentTime.second()
#获取所需旋转角度
#hour_angle=hour*30.0+(minite/60.0)*30.0
#minite_angle=(minite/60.0)*360.0
second_angle=second*6.0-53
source.ui.textEdit_get.setText(str(second))
self.draw_line(painter, transform, second_angle)
self.draw_line(painter, transform)
def draw_line(self, painter, transform, angle=-53):
#秒针
transform.reset()
transform.translate(50,50)
transform.rotate(angle)#second_angle
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setPen(QPen(Qt.darkCyan,1))
painter.drawLine(50,50,90,20)
def drawRect(self, pt):
pen1=QPen(QColor(225, 225, 225, 225))
rec=QRect(500, 500,500, 500)
pt.setPen(pen1)
pt.drawRect(rec)
pt.setBrush(QColor(0, 0, 0, 255))
pt.drawRect(300, 300, 300, 600)
#**********************************************************************8
from PyQt5 import QtCore, QtGui, QtWidgets
from source import source
from threadings import Tnuls
import threading
import time
from ui_paint import Window
def bigin_click(self):
if not source.isbigan:
if self.comboBox_2.currentText ()=="58同城":
t=Tnuls(0)
t.start()
source.isbigan=True
if self.comboBox_2.currentText ()=="中华英才网":
t=Tnuls(1)
t.start()
source.isbigan=True
if self.comboBox_2.currentText ()=="智联招聘":
t=Tnuls(2)
t.start()
source.isbigan=True
if self.comboBox_2.currentText ()=="猎聘猎头网":
t=Tnuls(3)
t.start()
source.isbigan=True
if self.comboBox_2.currentText ()=="卓博人才网":
t=Tnuls(4)
t.start()
source.isbigan=True
if self.c | omboBox_2.currentText ()=="前程无忧":
t=Tnuls(5)
| conditional_block |
|
note.py | self.textEdit_get.setText(self.strl)
self.textEdit_getstr.setText(self.ustr)
def crawling(self, url):
cl=crawler.crawler()
cl.gethtml(url)
self.urllist=cl.geturllist()
nexturllist=cl.getnexturl()
self.getnexturl(nexturllist, cl)
for i in range(len(self.urllist)):
ul=self.urllist[i]
self.ustr=self.ustr+str(i)+"、"+ul[1]+" :"+ul[0]+"\n\n"
#for ur in self.urllist:
# cl.gethtml(ur[0])
#sl=cl.getstrlist()
# self.strl=self.strl+sl
def getnexturl(self, nexturllist, cl):
for i in range(len(nexturllist)):
nul=nexturllist[i]
self.nustr=self.nustr+nul[1]+nul[0]+"\n"
cl.gethtml("http://gz.58.com"+nul[0])
uls=cl.geturllist()
if cl.isend():
if i==(len(nexturllist)-1):
cl.gethtml("http://gz.58.com"+nul[0])
nus=cl.getnexturl()
del nus[0:(i+1)]
self.getnexturl(nus, cl)
self.urllist=self.urllist+uls
def threadcl(self):
url="http://gz.58.com/tech/?key=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&cmcskey=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&final=1&jump=2&specialtype=gls&canclequery=isbiz%3D0&sourcetype=4"
self.crawling(url)
'''#时针
transform.translate(50,50)
transform.rotate(hour_angle)
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setPen(Qt.NoPen)
painter.setBrush(QBrush(Qt.darkRed))
painter.drawPolygon(QPolygonF(hourPoints))
transform.reset()
#分针
transform.translate(50,50)
transform.rotate(minite_angle)
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setBrush(QBrush(Qt.darkGreen))
painter.drawPolygon(QPolygonF(minPoints))
transform.reset()
#秒针
transform.translate(50,50)
transform.rotate(-53)#second_angle
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setPen(QPen(Qt.darkCyan,1))
painter.drawLine(50,50,90,20)
'''
class Window(QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.initUI()
def initUI(self):
self.setGeometry(100, 35, 1200, 670)
def paintEvent(self,event):
source.ui.groupBox_show.close()
pt=QPainter(self)
pt.begi | #self.drawRect(pt)
self.drawclock(pt)
pt.end()
def drawRect(self, pt):
pen1=QPen(QColor(225, 225, 225, 225))
rec=QRect(500, 500,500, 500)
pt.setPen(pen1)
pt.drawRect(rec)
pt.setBrush(QColor(0, 0, 0, 255))
pt.drawRect(300, 300, 300, 600)
def drawclock(self, painter):
painter.setRenderHint(QPainter.Antialiasing)
#设置表盘中的文字字体
font=QFont("Times",6)
fm=QFontMetrics(font)
fontRect=fm.boundingRect("99")#获取绘制字体的矩形范围
#分针坐标点
minPoints=[QPointF(50,25),
QPointF(48,50),
QPointF(52,50)]
#时钟坐标点
hourPoints=[QPointF(50,35),
QPointF(48,50),
QPointF(52,50)]
side=min(self.width(),self.height())
painter.setViewport((2*self.width())/5,self.height()/16,
(4*side)/7, (4*side)/7)#始终处于窗口中心位置显示
#设置QPainter的坐标系统,无论窗体大小如何变化,
#窗体左上坐标为(0,0),右下坐标为(100,100),
#因此窗体中心坐标为(50,50)
painter.setWindow(0,0,100,100)
#绘制表盘,使用环形渐变色
niceBlue=QColor(150,150,200)
haloGrident=QRadialGradient(50,50,50,50,50)
haloGrident.setColorAt(0.0,Qt.lightGray)
haloGrident.setColorAt(0.5,Qt.darkGray)
haloGrident.setColorAt(0.9,Qt.white)
haloGrident.setColorAt(1.0,niceBlue)
painter.setBrush(haloGrident)
painter.setPen(QPen(Qt.darkGray,1))
painter.drawEllipse(0,0,100,100)
transform=QTransform()
#绘制时钟为0的字,以及刻度
painter.setPen(QPen(Qt.black,1.5))
fontRect.moveCenter(QPoint(50,10+fontRect.height()/2))
painter.setFont(font)
painter.drawLine(50,2,50,8)#
painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"0")
for i in range(1,12):
transform.translate(50, 50)
transform.rotate(30)
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.drawLine(50,2,50,8)
painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"%d" % i)
transform.reset()
#绘制分钟刻度线
painter.setPen(QPen(Qt.blue,1))
for i in range(1,60):
transform.translate(50,50)
transform.rotate(6)
transform.translate(-50,-50)
if i%5!=0:
painter.setWorldTransform(transform)
painter.drawLine(50,2,50,5)
transform.reset()
#获取当前时间
currentTime=QTime().currentTime()
#hour=currentTime.hour() if currentTime.hour()<12 else currentTime.hour()-12
#minite=currentTime.minute()
second=currentTime.second()
#获取所需旋转角度
#hour_angle=hour*30.0+(minite/60.0)*30.0
#minite_angle=(minite/60.0)*360.0
second_angle=second*6.0-53
source.ui.textEdit_get.setText(str(second))
self.draw_line(painter, transform, second_angle)
self.draw_line(painter, transform)
def draw_line(self, painter, transform, angle=-53):
#秒针
transform.reset()
transform.translate(50,50)
transform.rotate(angle)#second_angle
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setPen(QPen(Qt.darkCyan,1))
painter.drawLine(50,50,90,20)
def drawRect(self, pt):
pen1=QPen(QColor(225, 225, 225, 225))
rec=QRect(500, 500,500, 500)
pt.setPen(pen1)
pt.drawRect(rec)
pt.setBrush(QColor(0, 0, 0, 255))
pt.drawRect(300, 300, 300, 600)
#**********************************************************************8
from PyQt5 import QtCore, QtGui, QtWidgets
from source import source
from threadings import Tnuls
import threading
import time
from ui_paint import Window
def bigin_click(self):
if not source.isbigan:
if self.comboBox_2.currentText ()=="58同城":
t=Tnuls(0)
t.start()
source.isbigan=True
if self.comboBox_2.currentText ()=="中华英才网":
t=Tnuls(1 | n(self)
| identifier_name |
note.py | self.textEdit_get.setText(self.strl)
self.textEdit_getstr.setText(self.ustr)
def crawling(self, url):
cl=crawler.crawler()
cl.gethtml(url)
self.urllist=cl.geturllist()
nexturllist=cl.getnexturl()
self.getnexturl(nexturllist, cl)
for i in range(len(self.urllist)):
ul=self.urllist[i]
self.ustr=self.ustr+str(i)+"、"+ul[1]+" :"+ul[0]+"\n\n"
#for ur in self.urllist:
# cl.gethtml(ur[0])
#sl=cl.getstrlist()
# self.strl=self.strl+sl
def getnexturl(self, nexturllist, cl):
for i in range(len(nexturllist)):
nul=nexturllist[i]
self.nustr=self.nustr+nul[1]+nul[0]+"\n"
cl.gethtml("http://gz.58.com"+nul[0])
uls=cl.geturllist()
if cl.isend():
if i==(len(nexturllist)-1):
cl.gethtml("http://gz.58.com"+nul[0])
nus=cl.getnexturl()
del nus[0:(i+1)]
self.getnexturl(nus, cl)
self.urllist=self.urllist+uls
def threadcl(self):
url="http://gz.58.com/tech/?key=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&cmcskey=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&final=1&jump=2&specialtype=gls&canclequery=isbiz%3D0&sourcetype=4"
self.crawling(url)
'''#时针
transform.translate(50,50)
transform.rotate(hour_angle)
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setPen(Qt.NoPen)
painter.setBrush(QBrush(Qt.darkRed))
painter.drawPolygon(QPolygonF(hourPoints))
transform.reset()
#分针
transform.translate(50,50)
transform.rotate(minite_angle)
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setBrush(QBrush(Qt.darkGreen))
painter.drawPolygon(QPolygonF(minPoints))
transform.reset()
#秒针
transform.translate(50,50)
transform.rotate(-53)#second_angle
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setPen(QPen(Qt.darkCyan,1))
painter.drawLine(50,50,90,20)
'''
class Window(QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.initUI()
def initUI(self):
self.setGeometry(100, 35, 1200, 670)
def paintEvent(self,event):
source.ui.groupBox_show.close()
pt=QPainter(self)
pt.begin(self)
#self.drawRect(pt)
self.drawclock(pt)
pt.end()
def drawRect(self, pt):
pen1=QPen(QColor(225, 225, 225, 225))
rec=QRect(500, 500,500, 500)
pt.setPen(pen1)
pt.drawRect(rec)
pt.setBrush(QColor(0, 0, 0, 255))
pt.drawRect(300, 300, 300, 600)
def drawclock(self, painter):
painter.setRenderHint(QPainter.Antialiasing)
#设置表盘中的文字字体
font=QFont("Times",6)
fm=QFontMetrics(font)
fontRect=fm.boundingRect("99")#获取绘制字体的矩形范围
#分针坐标点
minPoints=[QPointF(50,25),
QPointF(48,50),
QPointF(52,50)]
#时钟坐标点
hourPoints=[QPointF(50,35),
QPointF(48,50),
QPointF(52,50)]
side=min(self.width(),self.height())
painter.setViewport((2*self.width())/5,self.height()/16,
(4*side)/7, (4*side)/7)#始终处于窗口中心位置显示
#设置QPainter的坐标系统,无论窗体大小如何变化,
#窗体左上坐标为(0,0),右下坐标为(100,100),
#因此窗体中心坐标为(50,50)
painter.setWindow(0,0,100,100)
#绘制表盘,使用环形渐变色
niceBlue=QColor(150,150,200)
haloGrident=QRadialGradient(50,50,50,50,50)
haloGrident.setColorAt(0.0,Qt.lightGray)
haloGrident.setColorAt(0.5,Qt.darkGray)
haloGrident.setColorAt(0.9,Qt.white)
haloGrident.setColorAt(1.0,niceBlue)
painter.setBrush(haloGrident)
painter.setPen(QPen(Qt.darkGray,1))
painter.drawEllipse(0,0,100,100)
transform=QTransform()
#绘制时钟为0的字,以及刻度
painter.setPen(QPen(Qt.black,1.5))
fontRect.moveCenter(QPoint(50,10+fontRect.height()/2))
painter.setFont(font)
painter.drawLine(50,2,50,8)#
painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"0")
for i in range(1,12):
transform.translate(50, 50)
transform.rotate(30)
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.drawLine(50,2,50,8)
painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"%d" % i)
transform.reset()
#绘制分钟刻度线
painter.setPen(QPen(Qt.blue,1))
for i in range(1,60):
transform.translate(50,50)
transform.rotate(6)
transform.translate(-50,-50)
if i%5!=0:
painter.setWorldTransform(transform)
painter.drawLine(50,2,50,5)
transform.reset()
#获取当前时间
currentTime=QTime().currentTime()
#hour=currentTime.hour() if currentTime.hour()<12 else currentTime.hour()-12
#minite=currentTime.minute()
second=currentTime.second()
#获取所需旋转角度
#hour_angle=hour*30.0+(minite/60.0)*30.0
#minite_angle=(minite/60.0)*360.0
second_angle=second*6.0-53
source.ui.textEdit_get.setText(str(second))
self.draw_line(painter, transform, second_angle)
self.draw_line(painter, transform)
def draw_line(self, painter, transform, angle=-53):
#秒针
transform.reset()
transform.translate(50,50)
transform.rotate(angle)#second_angle
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setPen(QPen(Qt.darkCyan,1))
painter.drawLine(50,50,90,20)
def drawRect(self, pt):
pen1=QPen(QColor(225, 225, 225, 225))
rec=QRect(500, 500,500, 500)
pt.setPen(pen1)
pt.drawRect(rec)
pt.setBrush(QColor(0, 0, 0, 255))
pt.drawRect(300, 300, 300, 600) | import threading
import time
from ui_paint import Window
def bigin_click(self):
if not source.isbigan:
if self.comboBox_2.currentText ()=="58同城":
t=Tnuls(0)
t.start()
source.isbigan=True
if self.comboBox_2.currentText ()=="中华英才网":
t=Tnuls(1)
t |
#**********************************************************************8
from PyQt5 import QtCore, QtGui, QtWidgets
from source import source
from threadings import Tnuls | random_line_split |
note.py | self.textEdit_get.setText(self.strl)
self.textEdit_getstr.setText(self.ustr)
def crawling(self, url):
cl=crawler.crawler()
cl.gethtml(url)
self.urllist=cl.geturllist()
nexturllist=cl.getnexturl()
self.getnexturl(nexturllist, cl)
for i in range(len(self.urllist)):
ul=self.urllist[i]
self.ustr=self.ustr+str(i)+"、"+ul[1]+" :"+ul[0]+"\n\n"
#for ur in self.urllist:
# cl.gethtml(ur[0])
#sl=cl.getstrlist()
# self.strl=self.strl+sl
def getnexturl(self, nexturllist, cl):
for i in range(len(nexturllist)):
nul=nexturllist[i]
self.nustr=self.nustr+nul[1]+nul[0]+"\n"
cl.gethtml("http://gz.58.com"+nul[0])
uls=cl.geturllist()
if cl.isend():
if i==(len(nexturllist)-1):
cl.gethtml("http://gz.58.com"+nul[0])
nus=cl.getnexturl()
del nus[0:(i+1)]
self.getnexturl(nus, cl)
self.urllist=self.urllist+uls
def threadcl(self):
url="http://gz.58.com/tech/?key=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&cmcskey=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&final=1&jump=2&specialtype=gls&canclequery=isbiz%3D0&sourcetype=4"
self.crawling(url)
'''#时针
transform.translate(50,50)
transform.rotate(hour_angle)
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setPen(Qt.NoPen)
painter.setBrush(QBrush(Qt.darkRed))
painter.drawPolygon(QPolygonF(hourPoints))
transform.reset()
#分针
transform.translate(50,50)
transform.rotate(minite_angle)
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setBrush(QBrush(Qt.darkGreen))
painter.drawPolygon(QPolygonF(minPoints))
transform.reset()
#秒针
transform.translate(50,50)
transform.rotate(-53)#second_angle
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setPen(QPen(Qt.darkCyan,1))
painter.drawLine(50,50,90,20)
'''
class Window(QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.initUI()
def initUI(self):
self.setGeometry(100, 35, 1200, 670)
def paintEvent(self,event):
source.ui.groupBox_show.close()
pt=QPainter(self)
pt.begin(self)
#self.drawRect(pt)
self.drawclock(pt)
pt.end()
def drawRect(self, pt):
pen1=QPen(QColor(225, 225, 225, 225))
rec=QRect(500, 500,500, 500)
pt.setPen(pen1)
pt.drawRect(rec)
pt.setBrush(QColor(0, 0, 0, 255))
pt.drawRect(300, 300, 300, 600)
def drawclock(self, painter):
painter.se | minPoints=[QPointF(50,25),
QPointF(48,50),
QPointF(52,50)]
#时钟坐标点
hourPoints=[QPointF(50,35),
QPointF(48,50),
QPointF(52,50)]
side=min(self.width(),self.height())
painter.setViewport((2*self.width())/5,self.height()/16,
(4*side)/7, (4*side)/7)#始终处于窗口中心位置显示
#设置QPainter的坐标系统,无论窗体大小如何变化,
#窗体左上坐标为(0,0),右下坐标为(100,100),
#因此窗体中心坐标为(50,50)
painter.setWindow(0,0,100,100)
#绘制表盘,使用环形渐变色
niceBlue=QColor(150,150,200)
haloGrident=QRadialGradient(50,50,50,50,50)
haloGrident.setColorAt(0.0,Qt.lightGray)
haloGrident.setColorAt(0.5,Qt.darkGray)
haloGrident.setColorAt(0.9,Qt.white)
haloGrident.setColorAt(1.0,niceBlue)
painter.setBrush(haloGrident)
painter.setPen(QPen(Qt.darkGray,1))
painter.drawEllipse(0,0,100,100)
transform=QTransform()
#绘制时钟为0的字,以及刻度
painter.setPen(QPen(Qt.black,1.5))
fontRect.moveCenter(QPoint(50,10+fontRect.height()/2))
painter.setFont(font)
painter.drawLine(50,2,50,8)#
painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"0")
for i in range(1,12):
transform.translate(50, 50)
transform.rotate(30)
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.drawLine(50,2,50,8)
painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"%d" % i)
transform.reset()
#绘制分钟刻度线
painter.setPen(QPen(Qt.blue,1))
for i in range(1,60):
transform.translate(50,50)
transform.rotate(6)
transform.translate(-50,-50)
if i%5!=0:
painter.setWorldTransform(transform)
painter.drawLine(50,2,50,5)
transform.reset()
#获取当前时间
currentTime=QTime().currentTime()
#hour=currentTime.hour() if currentTime.hour()<12 else currentTime.hour()-12
#minite=currentTime.minute()
second=currentTime.second()
#获取所需旋转角度
#hour_angle=hour*30.0+(minite/60.0)*30.0
#minite_angle=(minite/60.0)*360.0
second_angle=second*6.0-53
source.ui.textEdit_get.setText(str(second))
self.draw_line(painter, transform, second_angle)
self.draw_line(painter, transform)
def draw_line(self, painter, transform, angle=-53):
#秒针
transform.reset()
transform.translate(50,50)
transform.rotate(angle)#second_angle
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setPen(QPen(Qt.darkCyan,1))
painter.drawLine(50,50,90,20)
def drawRect(self, pt):
pen1=QPen(QColor(225, 225, 225, 225))
rec=QRect(500, 500,500, 500)
pt.setPen(pen1)
pt.drawRect(rec)
pt.setBrush(QColor(0, 0, 0, 255))
pt.drawRect(300, 300, 300, 600)
#**********************************************************************8
from PyQt5 import QtCore, QtGui, QtWidgets
from source import source
from threadings import Tnuls
import threading
import time
from ui_paint import Window
def bigin_click(self):
if not source.isbigan:
if self.comboBox_2.currentText ()=="58同城":
t=Tnuls(0)
t.start()
source.isbigan=True
if self.comboBox_2.currentText ()=="中华英才网":
t=Tnuls(1)
| tRenderHint(QPainter.Antialiasing)
#设置表盘中的文字字体
font=QFont("Times",6)
fm=QFontMetrics(font)
fontRect=fm.boundingRect("99")#获取绘制字体的矩形范围
#分针坐标点
| identifier_body |
app.js | 592000');
}
// Enable CORS preflight across the board.
app.options('*', cors());
app.use(cors());
/////////// Configure Webserver ///////////
app.use(function(req, res, next){
var keys;
console.log('silly', '------------------------------------------ incoming request ------------------------------------------');
console.log('info', 'New ' + req.method + ' request for', req.url);
req.bag = {}; //create my object for my stuff
req.session.count = eval(req.session.count) + 1;
req.bag.session = req.session;
var url_parts = url.parse(req.url, true);
req.parameters = url_parts.query;
keys = Object.keys(req.parameters);
if(req.parameters && keys.length > 0) console.log({parameters: req.parameters}); //print request parameters
keys = Object.keys(req.body);
if (req.body && keys.length > 0) console.log({body: req.body}); //print request body
next();
});
//// Router ////
app.use('/', require('./routes/site_router'));
////////////////////////////////////////////
////////////// Error Handling //////////////
////////////////////////////////////////////
app.use(function(req, res, next) {
var err = new Error('Not Found');
err.status = 404;
next(err);
});
app.use(function(err, req, res, next) { // = development error handler, print stack trace
console.log("Error Handeler -", req.url);
var errorCode = err.status || 500;
res.status(errorCode);
req.bag.error = {msg:err.stack, status:errorCode};
if(req.bag.error.status == 404) req.bag.error.msg = "Sorry, I cannot locate that file";
res.render('template/error', {bag:req.bag});
});
// ============================================================================================================================
// Launch Webserver
// ============================================================================================================================
var server = http.createServer(app).listen(port, function() {});
process.env.NODE_TLS_REJECT_UNAUTHORIZED = "0";
server.timeout = 240000; // Ta-da.
console.log('info', '------------------------------------------ Server Up - ' + host + ':' + port + ' ------------------------------------------');
if(process.env.PRODUCTION) console.log('Running using Production settings');
else console.log('Running using Developer settings');
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// Warning
// ============================================================================================================================
// ============================================================================================================================
// Entering
// ============================================================================================================================
// ============================================================================================================================
// Test Area
// ============================================================================================================================
var app1 = require('./utils/ws_app1');
var app2 = require('./utils/ws_app2');
var ws = require('ws');
var wss = {};
var Obc1 = require('./utils/obc-js/index');
var obc = new Obc1();
// ==================================
// load peers manually or from VCAP, VCAP will overwrite hardcoded list!
// ==================================
var peers = [
{
"discovery_host": "169.44.38.124",
"discovery_port": "32826",
"api_host": "169.44.38.124",
"api_port": "32827",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp1",
"api_url": "http://169.44.38.124:32827"
},
{
"discovery_host": "169.44.38.120",
"discovery_port": "32804",
"api_host": "169.44.38.120",
"api_port": "32805",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp3",
"api_url": "http://169.44.38.120:32805"
},
{
"discovery_host": "169.44.38.114",
"discovery_port": "32810",
"api_host": "169.44.38.114",
"api_port": "32811",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp4",
"api_url": "http://169.44.38.114:32811"
},
{
"discovery_host": "169.44.38.102",
"discovery_port": "32820",
"api_host": "169.44.38.102",
"api_port": "32821",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp5",
"api_url": "http://169.44.38.102:32821"
},
{
"discovery_host": "169.44.38.102",
"discovery_port": "32818",
"api_host": "169.44.38.102",
"api_port": "32819",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp2",
"api_url": "http://169.44.38.102:32819"
}
];
console.log('loading hardcoded peers');
if(process.env.VCAP_SERVICES){ //load from vcap, search for service, 1 of the 3 should be found...
var servicesObject = JSON.parse(process.env.VCAP_SERVICES);
for(var i in servicesObject){
if(i.indexOf('ibm-blockchain') >= 0){ //looks close enough
if(servicesObject[i][0].credentials && servicesObject[i][0].credentials.peers){
console.log('overwritting peers, loading from a vcap service: ', i);
peers = servicesObject[i][0].credentials.peers;
break;
}
}
}
}
obc.network(peers); //setup network connection for rest endpoint
// ==================================
// configure obc-js sdk
// ==================================
var options = {
zip_url: 'https://github.com/dshuffma-ibm/simplestuff/archive/master.zip',
git_dir: 'simplestuff-master/phase2', //subdirectroy name of chaincode after unzipped
git_url: 'https://github.com/dshuffma-ibm/simplestuff/phase2', //git clone http url
//hashed cc name from prev deployment
deployed_name: '4a237d1e7be8bb2fe61a9f00b7200c1f9a16f77ec2dc4045a540fd84da2327a80975d66394add22961544ea07dae943a1941f175d547b554a0b5d5d2fa8d7c93'
};
if(process.env.VCAP_SERVICES){
console.log('\n[!] looks like you are in bluemix, I am going to clear out the deploy_name so that it deploys new cc.\n[!] hope that is ok budddy\n');
options.deployed_name = "";
}
obc.load(options, cb_ready); //parse/load chaincode
function cb_ready(err, cc){ //response has chaincode functions
app1.setup(obc, cc);
app2.setup(obc, cc);
if(cc.details.deployed_name === ""){ //decide if i need to deploy
cc.deploy('init', ['99'], './cc_summaries', cb_deployed);
}
else{
console.log('chaincode summary file indicates chaincode has been previously deployed');
cb_deployed();
}
}
// ============================================================================================================================
// WebSocket Communication Madness
// ============================================================================================================================
function cb_deployed() | {
console.log('starting websocket');
obc.save('./cc_summaries'); //save it here for chaincode investigator
wss = new ws.Server({server: server}); //start the websocket now
wss.on('connection', function connection(ws) {
//ws_cons.push(ws);
ws.on('message', function incoming(message) {
console.log('received ws msg:', message);
var data = JSON.parse(message);
app1.process_msg(ws, data);
app2.process_msg(ws, data);
});
ws.on('close', function(){
app2.close(); //close peridic poll that phase 2 does
});
});
} | identifier_body |
|
app.js | '));
app.set('view engine', 'jade');
app.engine('.html', require('jade').__express);
app.use(compression());
app.use(morgan('dev'));
app.use(bodyParser.json());
app.use(bodyParser.urlencoded());
app.use(cookieParser());
app.use('/cc/summary', serve_static(path.join(__dirname, 'cc_summaries')) ); //for chaincode investigator
//app.use( serve_static(path.join(__dirname, 'public'), {maxAge: '1d', setHeaders: setCustomCC}) ); //1 day cache
app.use( serve_static(path.join(__dirname, 'public')) );
app.use(session({secret:'Somethignsomething1234!test', resave:true, saveUninitialized:true}));
function setCustomCC(res, path) {
if (serve_static.mime.lookup(path) === 'image/jpeg') res.setHeader('Cache-Control', 'public, max-age=2592000'); //30 days cache
else if (serve_static.mime.lookup(path) === 'image/png') res.setHeader('Cache-Control', 'public, max-age=2592000');
else if (serve_static.mime.lookup(path) === 'image/x-icon') res.setHeader('Cache-Control', 'public, max-age=2592000');
}
// Enable CORS preflight across the board.
app.options('*', cors());
app.use(cors());
/////////// Configure Webserver ///////////
app.use(function(req, res, next){
var keys;
console.log('silly', '------------------------------------------ incoming request ------------------------------------------');
console.log('info', 'New ' + req.method + ' request for', req.url);
req.bag = {}; //create my object for my stuff
req.session.count = eval(req.session.count) + 1;
req.bag.session = req.session;
var url_parts = url.parse(req.url, true);
req.parameters = url_parts.query;
keys = Object.keys(req.parameters);
if(req.parameters && keys.length > 0) console.log({parameters: req.parameters}); //print request parameters
keys = Object.keys(req.body);
if (req.body && keys.length > 0) console.log({body: req.body}); //print request body
next();
});
//// Router ////
app.use('/', require('./routes/site_router'));
////////////////////////////////////////////
////////////// Error Handling //////////////
////////////////////////////////////////////
app.use(function(req, res, next) {
var err = new Error('Not Found');
err.status = 404;
next(err);
});
app.use(function(err, req, res, next) { // = development error handler, print stack trace
console.log("Error Handeler -", req.url);
var errorCode = err.status || 500;
res.status(errorCode);
req.bag.error = {msg:err.stack, status:errorCode};
if(req.bag.error.status == 404) req.bag.error.msg = "Sorry, I cannot locate that file";
res.render('template/error', {bag:req.bag});
});
// ============================================================================================================================
// Launch Webserver
// ============================================================================================================================
var server = http.createServer(app).listen(port, function() {});
process.env.NODE_TLS_REJECT_UNAUTHORIZED = "0";
server.timeout = 240000; // Ta-da.
console.log('info', '------------------------------------------ Server Up - ' + host + ':' + port + ' ------------------------------------------');
if(process.env.PRODUCTION) console.log('Running using Production settings');
else console.log('Running using Developer settings');
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// Warning
// ============================================================================================================================
// ============================================================================================================================
// Entering
// ============================================================================================================================
// ============================================================================================================================
// Test Area
// ============================================================================================================================
var app1 = require('./utils/ws_app1');
var app2 = require('./utils/ws_app2');
var ws = require('ws');
var wss = {};
var Obc1 = require('./utils/obc-js/index');
var obc = new Obc1();
// ==================================
// load peers manually or from VCAP, VCAP will overwrite hardcoded list!
// ==================================
var peers = [
{
"discovery_host": "169.44.38.124",
"discovery_port": "32826",
"api_host": "169.44.38.124",
"api_port": "32827",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp1",
"api_url": "http://169.44.38.124:32827"
},
{
"discovery_host": "169.44.38.120",
"discovery_port": "32804",
"api_host": "169.44.38.120",
"api_port": "32805",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp3",
"api_url": "http://169.44.38.120:32805"
},
{
"discovery_host": "169.44.38.114",
"discovery_port": "32810",
"api_host": "169.44.38.114",
"api_port": "32811",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp4",
"api_url": "http://169.44.38.114:32811"
},
{
"discovery_host": "169.44.38.102",
"discovery_port": "32820",
"api_host": "169.44.38.102",
"api_port": "32821",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp5",
"api_url": "http://169.44.38.102:32821"
},
{
"discovery_host": "169.44.38.102",
"discovery_port": "32818",
"api_host": "169.44.38.102",
"api_port": "32819",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp2",
"api_url": "http://169.44.38.102:32819"
}
];
console.log('loading hardcoded peers');
if(process.env.VCAP_SERVICES){ //load from vcap, search for service, 1 of the 3 should be found...
var servicesObject = JSON.parse(process.env.VCAP_SERVICES);
for(var i in servicesObject){
if(i.indexOf('ibm-blockchain') >= 0){ //looks close enough
if(servicesObject[i][0].credentials && servicesObject[i][0].credentials.peers){
console.log('overwritting peers, loading from a vcap service: ', i);
peers = servicesObject[i][0].credentials.peers;
break;
}
}
}
}
obc.network(peers); //setup network connection for rest endpoint
// ==================================
// configure obc-js sdk
// ==================================
var options = {
zip_url: 'https://github.com/dshuffma-ibm/simplestuff/archive/master.zip',
git_dir: 'simplestuff-master/phase2', //subdirectroy name of chaincode after unzipped
git_url: 'https://github.com/dshuffma-ibm/simplestuff/phase2', //git clone http url
//hashed cc name from prev deployment
deployed_name: '4a237d1e7be8bb2fe61a9f00b7200c1f9a16f77ec2dc4045a540fd84da2327a80975d66394add22961544ea07dae943a1941f175d547b554a0b5d5d2fa8d7c93'
};
if(process.env.VCAP_SERVICES) |
obc.load(options, cb_ready); //parse/load chain | {
console.log('\n[!] looks like you are in bluemix, I am going to clear out the deploy_name so that it deploys new cc.\n[!] hope that is ok budddy\n');
options.deployed_name = "";
} | conditional_block |
app.js | serve_static(path.join(__dirname, 'public')) );
app.use(session({secret:'Somethignsomething1234!test', resave:true, saveUninitialized:true}));
function setCustomCC(res, path) {
if (serve_static.mime.lookup(path) === 'image/jpeg') res.setHeader('Cache-Control', 'public, max-age=2592000'); //30 days cache
else if (serve_static.mime.lookup(path) === 'image/png') res.setHeader('Cache-Control', 'public, max-age=2592000');
else if (serve_static.mime.lookup(path) === 'image/x-icon') res.setHeader('Cache-Control', 'public, max-age=2592000');
}
// Enable CORS preflight across the board.
app.options('*', cors());
app.use(cors());
/////////// Configure Webserver ///////////
app.use(function(req, res, next){
var keys;
console.log('silly', '------------------------------------------ incoming request ------------------------------------------');
console.log('info', 'New ' + req.method + ' request for', req.url);
req.bag = {}; //create my object for my stuff
req.session.count = eval(req.session.count) + 1;
req.bag.session = req.session;
var url_parts = url.parse(req.url, true);
req.parameters = url_parts.query;
keys = Object.keys(req.parameters);
if(req.parameters && keys.length > 0) console.log({parameters: req.parameters}); //print request parameters
keys = Object.keys(req.body);
if (req.body && keys.length > 0) console.log({body: req.body}); //print request body
next();
});
//// Router ////
app.use('/', require('./routes/site_router'));
////////////////////////////////////////////
////////////// Error Handling //////////////
////////////////////////////////////////////
app.use(function(req, res, next) {
var err = new Error('Not Found');
err.status = 404;
next(err);
});
app.use(function(err, req, res, next) { // = development error handler, print stack trace
console.log("Error Handeler -", req.url);
var errorCode = err.status || 500;
res.status(errorCode);
req.bag.error = {msg:err.stack, status:errorCode};
if(req.bag.error.status == 404) req.bag.error.msg = "Sorry, I cannot locate that file";
res.render('template/error', {bag:req.bag});
});
// ============================================================================================================================
// Launch Webserver
// ============================================================================================================================
var server = http.createServer(app).listen(port, function() {});
process.env.NODE_TLS_REJECT_UNAUTHORIZED = "0";
server.timeout = 240000; // Ta-da.
console.log('info', '------------------------------------------ Server Up - ' + host + ':' + port + ' ------------------------------------------');
if(process.env.PRODUCTION) console.log('Running using Production settings');
else console.log('Running using Developer settings');
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// Warning
// ============================================================================================================================
// ============================================================================================================================
// Entering
// ============================================================================================================================
// ============================================================================================================================
// Test Area
// ============================================================================================================================
var app1 = require('./utils/ws_app1');
var app2 = require('./utils/ws_app2');
var ws = require('ws');
var wss = {};
var Obc1 = require('./utils/obc-js/index');
var obc = new Obc1();
// ==================================
// load peers manually or from VCAP, VCAP will overwrite hardcoded list!
// ==================================
var peers = [
{
"discovery_host": "169.44.38.124",
"discovery_port": "32826",
"api_host": "169.44.38.124",
"api_port": "32827",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp1",
"api_url": "http://169.44.38.124:32827"
},
{
"discovery_host": "169.44.38.120",
"discovery_port": "32804",
"api_host": "169.44.38.120",
"api_port": "32805",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp3",
"api_url": "http://169.44.38.120:32805"
},
{
"discovery_host": "169.44.38.114",
"discovery_port": "32810",
"api_host": "169.44.38.114",
"api_port": "32811",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp4",
"api_url": "http://169.44.38.114:32811"
},
{
"discovery_host": "169.44.38.102",
"discovery_port": "32820",
"api_host": "169.44.38.102",
"api_port": "32821",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp5",
"api_url": "http://169.44.38.102:32821"
},
{
"discovery_host": "169.44.38.102",
"discovery_port": "32818",
"api_host": "169.44.38.102",
"api_port": "32819",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp2",
"api_url": "http://169.44.38.102:32819"
}
];
console.log('loading hardcoded peers');
if(process.env.VCAP_SERVICES){ //load from vcap, search for service, 1 of the 3 should be found...
var servicesObject = JSON.parse(process.env.VCAP_SERVICES);
for(var i in servicesObject){
if(i.indexOf('ibm-blockchain') >= 0){ //looks close enough
if(servicesObject[i][0].credentials && servicesObject[i][0].credentials.peers){
console.log('overwritting peers, loading from a vcap service: ', i);
peers = servicesObject[i][0].credentials.peers;
break;
}
}
}
}
obc.network(peers); //setup network connection for rest endpoint
// ==================================
// configure obc-js sdk
// ==================================
var options = {
zip_url: 'https://github.com/dshuffma-ibm/simplestuff/archive/master.zip',
git_dir: 'simplestuff-master/phase2', //subdirectroy name of chaincode after unzipped
git_url: 'https://github.com/dshuffma-ibm/simplestuff/phase2', //git clone http url
//hashed cc name from prev deployment
deployed_name: '4a237d1e7be8bb2fe61a9f00b7200c1f9a16f77ec2dc4045a540fd84da2327a80975d66394add22961544ea07dae943a1941f175d547b554a0b5d5d2fa8d7c93'
};
if(process.env.VCAP_SERVICES){
console.log('\n[!] looks like you are in bluemix, I am going to clear out the deploy_name so that it deploys new cc.\n[!] hope that is ok budddy\n');
options.deployed_name = "";
}
obc.load(options, cb_ready); //parse/load chaincode
function cb_ready(err, cc){ //response has chaincode functions
app1.setup(obc, cc);
app2.setup(obc, cc);
if(cc.details.deployed_name === ""){ //decide if i need to deploy
cc.deploy('init', ['99'], './cc_summaries', cb_deployed);
}
else{
console.log('chaincode summary file indicates chaincode has been previously deployed');
cb_deployed();
}
}
// ============================================================================================================================
// WebSocket Communication Madness
// ============================================================================================================================
function | cb_deployed | identifier_name |
|
app.js | '));
app.set('view engine', 'jade');
app.engine('.html', require('jade').__express);
app.use(compression());
app.use(morgan('dev'));
app.use(bodyParser.json());
app.use(bodyParser.urlencoded());
app.use(cookieParser());
app.use('/cc/summary', serve_static(path.join(__dirname, 'cc_summaries')) ); //for chaincode investigator
//app.use( serve_static(path.join(__dirname, 'public'), {maxAge: '1d', setHeaders: setCustomCC}) ); //1 day cache | else if (serve_static.mime.lookup(path) === 'image/x-icon') res.setHeader('Cache-Control', 'public, max-age=2592000');
}
// Enable CORS preflight across the board.
app.options('*', cors());
app.use(cors());
/////////// Configure Webserver ///////////
app.use(function(req, res, next){
var keys;
console.log('silly', '------------------------------------------ incoming request ------------------------------------------');
console.log('info', 'New ' + req.method + ' request for', req.url);
req.bag = {}; //create my object for my stuff
req.session.count = eval(req.session.count) + 1;
req.bag.session = req.session;
var url_parts = url.parse(req.url, true);
req.parameters = url_parts.query;
keys = Object.keys(req.parameters);
if(req.parameters && keys.length > 0) console.log({parameters: req.parameters}); //print request parameters
keys = Object.keys(req.body);
if (req.body && keys.length > 0) console.log({body: req.body}); //print request body
next();
});
//// Router ////
app.use('/', require('./routes/site_router'));
////////////////////////////////////////////
////////////// Error Handling //////////////
////////////////////////////////////////////
app.use(function(req, res, next) {
var err = new Error('Not Found');
err.status = 404;
next(err);
});
app.use(function(err, req, res, next) { // = development error handler, print stack trace
console.log("Error Handeler -", req.url);
var errorCode = err.status || 500;
res.status(errorCode);
req.bag.error = {msg:err.stack, status:errorCode};
if(req.bag.error.status == 404) req.bag.error.msg = "Sorry, I cannot locate that file";
res.render('template/error', {bag:req.bag});
});
// ============================================================================================================================
// Launch Webserver
// ============================================================================================================================
var server = http.createServer(app).listen(port, function() {});
process.env.NODE_TLS_REJECT_UNAUTHORIZED = "0";
server.timeout = 240000; // Ta-da.
console.log('info', '------------------------------------------ Server Up - ' + host + ':' + port + ' ------------------------------------------');
if(process.env.PRODUCTION) console.log('Running using Production settings');
else console.log('Running using Developer settings');
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// ============================================================================================================================
// Warning
// ============================================================================================================================
// ============================================================================================================================
// Entering
// ============================================================================================================================
// ============================================================================================================================
// Test Area
// ============================================================================================================================
var app1 = require('./utils/ws_app1');
var app2 = require('./utils/ws_app2');
var ws = require('ws');
var wss = {};
var Obc1 = require('./utils/obc-js/index');
var obc = new Obc1();
// ==================================
// load peers manually or from VCAP, VCAP will overwrite hardcoded list!
// ==================================
var peers = [
{
"discovery_host": "169.44.38.124",
"discovery_port": "32826",
"api_host": "169.44.38.124",
"api_port": "32827",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp1",
"api_url": "http://169.44.38.124:32827"
},
{
"discovery_host": "169.44.38.120",
"discovery_port": "32804",
"api_host": "169.44.38.120",
"api_port": "32805",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp3",
"api_url": "http://169.44.38.120:32805"
},
{
"discovery_host": "169.44.38.114",
"discovery_port": "32810",
"api_host": "169.44.38.114",
"api_port": "32811",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp4",
"api_url": "http://169.44.38.114:32811"
},
{
"discovery_host": "169.44.38.102",
"discovery_port": "32820",
"api_host": "169.44.38.102",
"api_port": "32821",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp5",
"api_url": "http://169.44.38.102:32821"
},
{
"discovery_host": "169.44.38.102",
"discovery_port": "32818",
"api_host": "169.44.38.102",
"api_port": "32819",
"id": "41e46128-cbed-4cc9-a635-bc140226a309_vp2",
"api_url": "http://169.44.38.102:32819"
}
];
console.log('loading hardcoded peers');
if(process.env.VCAP_SERVICES){ //load from vcap, search for service, 1 of the 3 should be found...
var servicesObject = JSON.parse(process.env.VCAP_SERVICES);
for(var i in servicesObject){
if(i.indexOf('ibm-blockchain') >= 0){ //looks close enough
if(servicesObject[i][0].credentials && servicesObject[i][0].credentials.peers){
console.log('overwritting peers, loading from a vcap service: ', i);
peers = servicesObject[i][0].credentials.peers;
break;
}
}
}
}
obc.network(peers); //setup network connection for rest endpoint
// ==================================
// configure obc-js sdk
// ==================================
var options = {
zip_url: 'https://github.com/dshuffma-ibm/simplestuff/archive/master.zip',
git_dir: 'simplestuff-master/phase2', //subdirectroy name of chaincode after unzipped
git_url: 'https://github.com/dshuffma-ibm/simplestuff/phase2', //git clone http url
//hashed cc name from prev deployment
deployed_name: '4a237d1e7be8bb2fe61a9f00b7200c1f9a16f77ec2dc4045a540fd84da2327a80975d66394add22961544ea07dae943a1941f175d547b554a0b5d5d2fa8d7c93'
};
if(process.env.VCAP_SERVICES){
console.log('\n[!] looks like you are in bluemix, I am going to clear out the deploy_name so that it deploys new cc.\n[!] hope that is ok budddy\n');
options.deployed_name = "";
}
obc.load(options, cb_ready); //parse/load chaincode
function | app.use( serve_static(path.join(__dirname, 'public')) );
app.use(session({secret:'Somethignsomething1234!test', resave:true, saveUninitialized:true}));
function setCustomCC(res, path) {
if (serve_static.mime.lookup(path) === 'image/jpeg') res.setHeader('Cache-Control', 'public, max-age=2592000'); //30 days cache
else if (serve_static.mime.lookup(path) === 'image/png') res.setHeader('Cache-Control', 'public, max-age=2592000'); | random_line_split |
process_packet.rs | _packet(&mut self, ip_pkt: Ipv4Packet, frame_len: usize)
{
// Ignore packets that aren't TCP
if ip_pkt.get_next_level_protocol() != IpNextHeaderProtocols::Tcp {
return;
}
let tcp_pkt = match TcpPacket::new(ip_pkt.payload()) {
Some(pkt) => pkt,
None => return,
};
self.stats.tcp_packets_this_period += 1;
// Ignore packets that aren't -> 443.
// libpnet getters all return host order. Ignore the "u16be" in their
// docs; interactions with pnet are purely host order.
if tcp_pkt.get_destination() != 443 {
return;
}
self.stats.tls_packets_this_period += 1; // (HTTPS, really)
self.stats.tls_bytes_this_period += frame_len as u64;
self.process_tls_pkt(&ip_pkt, &tcp_pkt);
}
// Takes an IPv4 packet
// Assumes (for now) that TLS records are in a single TCP packet
// (no fragmentation).
// Fragments could be stored in the flow_tracker if needed.
pub fn process_tls_pkt(&mut self,
ip_pkt: &Ipv4Packet,
tcp_pkt: &TcpPacket)
{
let flow = Flow::new(ip_pkt, tcp_pkt);
if panic::catch_unwind(||{ tcp_pkt.payload(); }).is_err() {
return;
}
let tcp_flags = tcp_pkt.get_flags();
if (tcp_flags & TcpFlags::SYN) != 0 && (tcp_flags & TcpFlags::ACK) == 0
{
self.stats.port_443_syns_this_period += 1;
self.flow_tracker.begin_tracking_flow(&flow,
tcp_pkt.packet().to_vec());
return;
}
if !self.flow_tracker.tracking_at_all(&flow) {
return;
}
// Note that FINs and RSTs are welcome in consume_tcp_pkt() as well.
if !self.flow_tracker.consume_tcp_pkt_if_passive(&flow, tcp_pkt) {
// EventedSSLEavesdropped::consume_tcp_pkt() said to drop the flow.
self.flow_tracker.drop(&flow);
}
else if self.flow_tracker.is_td(&flow) {
// Forward packets from established overt flows into the tun
// interface, so that they'll reach forge_socket.
self.forward_to_forge_socket(ip_pkt);
if (tcp_flags & TcpFlags::FIN) != 0 {
// This stream (overt flow) is ending. The client might come and
// resume the TapDance session with a new stream, so leave the
// overall session state intact. The is_hup event's processing
// takes care of starting the BufferableSSL's cleanup.
// FlowTracker::notice_fin() will schedule a RST to be sent to
// the decoy server; forge_socket handles the FIN handshake.
self.flow_tracker.notice_fin(&flow);
}
}
else if (tcp_flags & TcpFlags::FIN) != 0 { // non-TD flow FINd => drop
self.flow_tracker.drop(&flow);
return;
}
if (tcp_flags & TcpFlags::RST) != 0 {
// End connection, remove any relevant state.
// TODO clean up TapDance session state, if any
// (TODO i believe that the earlier forward_to_forge_socket would
// cause a clien is_error event to fire, which would then clean up
// the session. should confirm.)
self.flow_tracker.drop(&flow);
return;
}
// This is a non-RST/FIN packet of a flow we are tracking, but that is
// not known to be TapDance. That means this might be a tag-bearing
// first TLS app data packet: establish a TD session if so.
if !self.flow_tracker.is_td(&flow) && is_tls_app_pkt(tcp_pkt) {
// ...buuut don't bother checking these known-irrelevant addresses:
// coming from U. Michigan (35.0.0.0/9)
// going to Google CDN servers in Michigan (192.122.185.0/24)
// coming from windyegret's internet connection (192.122.200.253)
// coming from more of U. Michigan (141.212.0.0/14)
let src = ip_pkt.get_source().octets();
let dest = ip_pkt.get_destination().octets();
if src[0] == 35 && (src[1] & 128 == 0) ||
dest[0] == 192 && dest[1] == 122 && dest[2] == 185 ||
src[0]==192 && src[1]==122 && src[2]==200 && src[3]==253 ||
src[0] == 141 && (src[2] & 252) == 212 ||
!self.try_establish_tapdance(&flow, tcp_pkt)
{
// No tag in first TLS app data packet ==> definitely not TD.
self.flow_tracker.drop(&flow);
}
}
}
fn forward_to_forge_socket(&mut self, ip_pkt: &Ipv4Packet)
{
let ip_len = ip_pkt.packet().len();
// TODO: see if the PI flag to the TUN interface can actually take care
// of this for us
let mut tun_pkt = Vec::with_capacity(ip_len+4);
// These mystery bytes are a link-layer header; the kernel "receives"
// tun packets as if they were really physically "received". Since they
// weren't physically received, they do not have an Ethernet header. It
// looks like the tun setup has its own type of header, rather than just
// making up a fake Ethernet header.
tun_pkt.extend_from_slice(&[0x00, 0x01, 0x08, 0x00]);
tun_pkt.extend_from_slice(ip_pkt.packet());
// Send into tun device (can fail, but these are best-effort IP packets)
self.tun.send(tun_pkt).unwrap_or_else(|e|{
warn!("failed to send packet into tun: {}", e); 0});
self.stats.cli2cov_raw_etherbytes_this_period += ip_len as u64;
}
// Inspects a TLS app data packet for a TapDance tag. If found, establishes
// the flow as a TapDance stream (and starts a new session, if 1st stream).
// Returns true iff a TapDance stream was successfully established.
fn try_establish_tapdance(&mut self,
flow: &Flow,
tcp_pkt: &TcpPacket) -> bool
{
let tag_payload = elligator::extract_telex_tag(&self.priv_key,
&tcp_pkt.payload());
self.stats.elligator_this_period += 1;
if tag_payload.len() < TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN +
TAG_CLI_RND_LEN + TAG_CON_ID_LEN
{
return false;
}
if tag_payload[0] & 128u8 == 0 { // traditional bidi forged TLS
// Decoy will ACK current packet with this value. (Host-order).
let expect_ack =
tcp_pkt.get_sequence()
.wrapping_add(tcp_pkt.payload().len() as u32);
let wscale_and_mss =
self.flow_tracker.mark_tapdance_flow(flow, expect_ack);
self.establish_bidi(tcp_pkt, flow, &tag_payload, wscale_and_mss)
} else |
}
pub fn establish_bidi(&mut self,
tcp_pkt: &TcpPacket, flow: &Flow,
tag_payload: &Vec<u8>,
wscale_and_mss: WscaleAndMSS) -> bool
{
let (_, master_key, server_random, client_random, session_id) =
parse_tag_payload(tag_payload);
let (tcp_ts, tcp_ts_ecr) = util::get_tcp_timestamps(tcp_pkt);
let mut client_ssl = BufferableSSL::new(session_id);
let ssl_success =
client_ssl.construct_forged_ssl(
tcp_pkt, flow, &wscale_and_mss, tcp_ts, tcp_ts_ecr,
master_key, client_random, server_random);
if ssl_success {
let (is_a_reconnect, rc, cov_error) =
self.create_or_recall_tapdance_session(session_id);
let ref mut td = rc.borrow_mut();
let tok = self.cli_ssl_driver.tok2sess.insert(rc.clone());
if !td.cli_pair.set_bidi(client_ssl, tok, &mut self.cli_ssl_poll) {
td.end_whole_session_error(SessionError::ClientProtocol);
return false;
}
td.expect_bidi_reconnect = false;
if let Some(cov_err) = cov_error {
td.end_whole_session_error(cov | { // upload-only eavesdropped TLS
// (don't mark as TD in FlowTracker until you have the Rc<RefCell>)
self.establish_upload_only(tcp_pkt, flow, &tag_payload)
} | conditional_block |
process_packet.rs | 4_packet(&mut self, ip_pkt: Ipv4Packet, frame_len: usize)
{
// Ignore packets that aren't TCP
if ip_pkt.get_next_level_protocol() != IpNextHeaderProtocols::Tcp {
return;
}
let tcp_pkt = match TcpPacket::new(ip_pkt.payload()) {
Some(pkt) => pkt,
None => return,
};
self.stats.tcp_packets_this_period += 1;
// Ignore packets that aren't -> 443.
// libpnet getters all return host order. Ignore the "u16be" in their
// docs; interactions with pnet are purely host order.
if tcp_pkt.get_destination() != 443 {
return;
}
self.stats.tls_packets_this_period += 1; // (HTTPS, really)
self.stats.tls_bytes_this_period += frame_len as u64;
self.process_tls_pkt(&ip_pkt, &tcp_pkt);
}
// Takes an IPv4 packet
// Assumes (for now) that TLS records are in a single TCP packet
// (no fragmentation).
// Fragments could be stored in the flow_tracker if needed.
pub fn process_tls_pkt(&mut self,
ip_pkt: &Ipv4Packet,
tcp_pkt: &TcpPacket)
{
let flow = Flow::new(ip_pkt, tcp_pkt);
if panic::catch_unwind(||{ tcp_pkt.payload(); }).is_err() {
return;
}
let tcp_flags = tcp_pkt.get_flags();
if (tcp_flags & TcpFlags::SYN) != 0 && (tcp_flags & TcpFlags::ACK) == 0
{
self.stats.port_443_syns_this_period += 1;
self.flow_tracker.begin_tracking_flow(&flow,
tcp_pkt.packet().to_vec());
return;
}
if !self.flow_tracker.tracking_at_all(&flow) {
return;
}
// Note that FINs and RSTs are welcome in consume_tcp_pkt() as well.
if !self.flow_tracker.consume_tcp_pkt_if_passive(&flow, tcp_pkt) {
// EventedSSLEavesdropped::consume_tcp_pkt() said to drop the flow.
self.flow_tracker.drop(&flow);
}
else if self.flow_tracker.is_td(&flow) {
// Forward packets from established overt flows into the tun
// interface, so that they'll reach forge_socket.
self.forward_to_forge_socket(ip_pkt);
if (tcp_flags & TcpFlags::FIN) != 0 {
// This stream (overt flow) is ending. The client might come and
// resume the TapDance session with a new stream, so leave the
// overall session state intact. The is_hup event's processing
// takes care of starting the BufferableSSL's cleanup.
// FlowTracker::notice_fin() will schedule a RST to be sent to
// the decoy server; forge_socket handles the FIN handshake.
self.flow_tracker.notice_fin(&flow);
}
}
else if (tcp_flags & TcpFlags::FIN) != 0 { // non-TD flow FINd => drop
self.flow_tracker.drop(&flow);
return;
}
if (tcp_flags & TcpFlags::RST) != 0 {
// End connection, remove any relevant state.
// TODO clean up TapDance session state, if any
// (TODO i believe that the earlier forward_to_forge_socket would
// cause a clien is_error event to fire, which would then clean up
// the session. should confirm.)
self.flow_tracker.drop(&flow);
return;
}
// This is a non-RST/FIN packet of a flow we are tracking, but that is
// not known to be TapDance. That means this might be a tag-bearing
// first TLS app data packet: establish a TD session if so.
if !self.flow_tracker.is_td(&flow) && is_tls_app_pkt(tcp_pkt) {
// ...buuut don't bother checking these known-irrelevant addresses:
// coming from U. Michigan (35.0.0.0/9)
// going to Google CDN servers in Michigan (192.122.185.0/24)
// coming from windyegret's internet connection (192.122.200.253)
// coming from more of U. Michigan (141.212.0.0/14)
let src = ip_pkt.get_source().octets();
let dest = ip_pkt.get_destination().octets();
if src[0] == 35 && (src[1] & 128 == 0) ||
dest[0] == 192 && dest[1] == 122 && dest[2] == 185 ||
src[0]==192 && src[1]==122 && src[2]==200 && src[3]==253 ||
src[0] == 141 && (src[2] & 252) == 212 ||
!self.try_establish_tapdance(&flow, tcp_pkt)
{
// No tag in first TLS app data packet ==> definitely not TD.
self.flow_tracker.drop(&flow);
}
}
}
fn forward_to_forge_socket(&mut self, ip_pkt: &Ipv4Packet)
{
let ip_len = ip_pkt.packet().len();
// TODO: see if the PI flag to the TUN interface can actually take care
// of this for us
let mut tun_pkt = Vec::with_capacity(ip_len+4);
// These mystery bytes are a link-layer header; the kernel "receives"
// tun packets as if they were really physically "received". Since they
// weren't physically received, they do not have an Ethernet header. It
// looks like the tun setup has its own type of header, rather than just
// making up a fake Ethernet header.
tun_pkt.extend_from_slice(&[0x00, 0x01, 0x08, 0x00]);
tun_pkt.extend_from_slice(ip_pkt.packet());
// Send into tun device (can fail, but these are best-effort IP packets)
self.tun.send(tun_pkt).unwrap_or_else(|e|{
warn!("failed to send packet into tun: {}", e); 0});
self.stats.cli2cov_raw_etherbytes_this_period += ip_len as u64;
}
// Inspects a TLS app data packet for a TapDance tag. If found, establishes
// the flow as a TapDance stream (and starts a new session, if 1st stream).
// Returns true iff a TapDance stream was successfully established.
fn try_establish_tapdance(&mut self,
flow: &Flow,
tcp_pkt: &TcpPacket) -> bool
{
let tag_payload = elligator::extract_telex_tag(&self.priv_key,
&tcp_pkt.payload());
self.stats.elligator_this_period += 1;
if tag_payload.len() < TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN +
TAG_CLI_RND_LEN + TAG_CON_ID_LEN
{
return false;
}
if tag_payload[0] & 128u8 == 0 { // traditional bidi forged TLS
// Decoy will ACK current packet with this value. (Host-order).
let expect_ack =
tcp_pkt.get_sequence()
.wrapping_add(tcp_pkt.payload().len() as u32);
let wscale_and_mss =
self.flow_tracker.mark_tapdance_flow(flow, expect_ack);
self.establish_bidi(tcp_pkt, flow, &tag_payload, wscale_and_mss)
} else { // upload-only eavesdropped TLS
// (don't mark as TD in FlowTracker until you have the Rc<RefCell>)
self.establish_upload_only(tcp_pkt, flow, &tag_payload)
}
}
pub fn establish_bidi(&mut self,
tcp_pkt: &TcpPacket, flow: &Flow,
tag_payload: &Vec<u8>,
wscale_and_mss: WscaleAndMSS) -> bool
| return false;
}
td.expect_bidi_reconnect = false;
if let Some(cov_err) = cov_error {
td.end_whole_session_error(cov_err | {
let (_, master_key, server_random, client_random, session_id) =
parse_tag_payload(tag_payload);
let (tcp_ts, tcp_ts_ecr) = util::get_tcp_timestamps(tcp_pkt);
let mut client_ssl = BufferableSSL::new(session_id);
let ssl_success =
client_ssl.construct_forged_ssl(
tcp_pkt, flow, &wscale_and_mss, tcp_ts, tcp_ts_ecr,
master_key, client_random, server_random);
if ssl_success {
let (is_a_reconnect, rc, cov_error) =
self.create_or_recall_tapdance_session(session_id);
let ref mut td = rc.borrow_mut();
let tok = self.cli_ssl_driver.tok2sess.insert(rc.clone());
if !td.cli_pair.set_bidi(client_ssl, tok, &mut self.cli_ssl_poll) {
td.end_whole_session_error(SessionError::ClientProtocol); | identifier_body |
process_packet.rs | },
EtherTypes::Ipv4 => ð_payload[0..],
_ => return,
};
match Ipv4Packet::new(ip_data) {
Some(pkt) => global.process_ipv4_packet(pkt, rust_view_len),
None => return,
}
}
fn is_tls_app_pkt(tcp_pkt: &TcpPacket) -> bool
{
let payload = tcp_pkt.payload();
payload.len() > 5 && payload[0] == TLS_TYPE_APPLICATION_DATA
}
impl PerCoreGlobal
{
// frame_len is supposed to be the length of the whole Ethernet frame. We're
// only passing it here for plumbing reasons, and just for stat reporting.
fn process_ipv4_packet(&mut self, ip_pkt: Ipv4Packet, frame_len: usize)
{
// Ignore packets that aren't TCP
if ip_pkt.get_next_level_protocol() != IpNextHeaderProtocols::Tcp {
return;
}
let tcp_pkt = match TcpPacket::new(ip_pkt.payload()) {
Some(pkt) => pkt,
None => return,
};
self.stats.tcp_packets_this_period += 1;
// Ignore packets that aren't -> 443.
// libpnet getters all return host order. Ignore the "u16be" in their
// docs; interactions with pnet are purely host order.
if tcp_pkt.get_destination() != 443 {
return;
}
self.stats.tls_packets_this_period += 1; // (HTTPS, really)
self.stats.tls_bytes_this_period += frame_len as u64;
self.process_tls_pkt(&ip_pkt, &tcp_pkt);
}
// Takes an IPv4 packet
// Assumes (for now) that TLS records are in a single TCP packet
// (no fragmentation).
// Fragments could be stored in the flow_tracker if needed.
pub fn process_tls_pkt(&mut self,
ip_pkt: &Ipv4Packet,
tcp_pkt: &TcpPacket)
{
let flow = Flow::new(ip_pkt, tcp_pkt);
if panic::catch_unwind(||{ tcp_pkt.payload(); }).is_err() {
return;
}
let tcp_flags = tcp_pkt.get_flags();
if (tcp_flags & TcpFlags::SYN) != 0 && (tcp_flags & TcpFlags::ACK) == 0
{
self.stats.port_443_syns_this_period += 1;
self.flow_tracker.begin_tracking_flow(&flow,
tcp_pkt.packet().to_vec());
return;
}
if !self.flow_tracker.tracking_at_all(&flow) {
return;
}
// Note that FINs and RSTs are welcome in consume_tcp_pkt() as well.
if !self.flow_tracker.consume_tcp_pkt_if_passive(&flow, tcp_pkt) {
// EventedSSLEavesdropped::consume_tcp_pkt() said to drop the flow.
self.flow_tracker.drop(&flow);
}
else if self.flow_tracker.is_td(&flow) {
// Forward packets from established overt flows into the tun
// interface, so that they'll reach forge_socket.
self.forward_to_forge_socket(ip_pkt);
if (tcp_flags & TcpFlags::FIN) != 0 {
// This stream (overt flow) is ending. The client might come and
// resume the TapDance session with a new stream, so leave the
// overall session state intact. The is_hup event's processing
// takes care of starting the BufferableSSL's cleanup.
// FlowTracker::notice_fin() will schedule a RST to be sent to
// the decoy server; forge_socket handles the FIN handshake.
self.flow_tracker.notice_fin(&flow);
}
}
else if (tcp_flags & TcpFlags::FIN) != 0 { // non-TD flow FINd => drop
self.flow_tracker.drop(&flow);
return;
}
if (tcp_flags & TcpFlags::RST) != 0 {
// End connection, remove any relevant state.
// TODO clean up TapDance session state, if any
// (TODO i believe that the earlier forward_to_forge_socket would
// cause a clien is_error event to fire, which would then clean up
// the session. should confirm.)
self.flow_tracker.drop(&flow);
return;
}
// This is a non-RST/FIN packet of a flow we are tracking, but that is
// not known to be TapDance. That means this might be a tag-bearing
// first TLS app data packet: establish a TD session if so.
if !self.flow_tracker.is_td(&flow) && is_tls_app_pkt(tcp_pkt) {
// ...buuut don't bother checking these known-irrelevant addresses:
// coming from U. Michigan (35.0.0.0/9)
// going to Google CDN servers in Michigan (192.122.185.0/24)
// coming from windyegret's internet connection (192.122.200.253)
// coming from more of U. Michigan (141.212.0.0/14)
let src = ip_pkt.get_source().octets();
let dest = ip_pkt.get_destination().octets();
if src[0] == 35 && (src[1] & 128 == 0) ||
dest[0] == 192 && dest[1] == 122 && dest[2] == 185 ||
src[0]==192 && src[1]==122 && src[2]==200 && src[3]==253 ||
src[0] == 141 && (src[2] & 252) == 212 ||
!self.try_establish_tapdance(&flow, tcp_pkt)
{
// No tag in first TLS app data packet ==> definitely not TD.
self.flow_tracker.drop(&flow);
}
}
}
fn forward_to_forge_socket(&mut self, ip_pkt: &Ipv4Packet)
{
let ip_len = ip_pkt.packet().len();
// TODO: see if the PI flag to the TUN interface can actually take care
// of this for us
let mut tun_pkt = Vec::with_capacity(ip_len+4);
// These mystery bytes are a link-layer header; the kernel "receives"
// tun packets as if they were really physically "received". Since they
// weren't physically received, they do not have an Ethernet header. It
// looks like the tun setup has its own type of header, rather than just
// making up a fake Ethernet header.
tun_pkt.extend_from_slice(&[0x00, 0x01, 0x08, 0x00]);
tun_pkt.extend_from_slice(ip_pkt.packet());
// Send into tun device (can fail, but these are best-effort IP packets)
self.tun.send(tun_pkt).unwrap_or_else(|e|{
warn!("failed to send packet into tun: {}", e); 0});
self.stats.cli2cov_raw_etherbytes_this_period += ip_len as u64;
}
// Inspects a TLS app data packet for a TapDance tag. If found, establishes
// the flow as a TapDance stream (and starts a new session, if 1st stream).
// Returns true iff a TapDance stream was successfully established.
fn try_establish_tapdance(&mut self,
flow: &Flow,
tcp_pkt: &TcpPacket) -> bool
{
let tag_payload = elligator::extract_telex_tag(&self.priv_key,
&tcp_pkt.payload());
self.stats.elligator_this_period += 1;
if tag_payload.len() < TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN +
TAG_CLI_RND_LEN + TAG_CON_ID_LEN
{
return false;
}
if tag_payload[0] & 128u8 == 0 { // traditional bidi forged TLS
// Decoy will ACK current packet with this value. (Host-order).
let expect_ack =
tcp_pkt.get_sequence()
.wrapping_add(tcp_pkt.payload().len() as u32);
let wscale_and_mss =
self.flow_tracker.mark_tapdance_flow(flow, expect_ack);
self.establish_bidi(tcp_pkt, flow, &tag_payload, wscale_and_mss)
} else { // upload-only eavesdropped TLS
// (don't mark as TD in FlowTracker until you have the Rc<RefCell>)
self.establish_upload_only(tcp_pkt, flow, &tag_payload)
}
}
pub fn establish_bidi(&mut self,
tcp_pkt: &TcpPacket, flow: &Flow,
tag_payload: &Vec<u8>,
wscale_and_mss: WscaleAndMSS) -> bool
{
let (_, master_key, server_random, client_random, session_id) =
parse_tag_payload(tag_payload);
let (tcp_ts, tcp_ts_ecr) = util::get_tcp_timestamps(tcp_pkt);
let mut client_ssl = BufferableSSL::new | // + (eth_payload[1] as u16);
ð_payload[4..]
} else {
return
} | random_line_split |
|
process_packet.rs | _packet(&mut self, ip_pkt: Ipv4Packet, frame_len: usize)
{
// Ignore packets that aren't TCP
if ip_pkt.get_next_level_protocol() != IpNextHeaderProtocols::Tcp {
return;
}
let tcp_pkt = match TcpPacket::new(ip_pkt.payload()) {
Some(pkt) => pkt,
None => return,
};
self.stats.tcp_packets_this_period += 1;
// Ignore packets that aren't -> 443.
// libpnet getters all return host order. Ignore the "u16be" in their
// docs; interactions with pnet are purely host order.
if tcp_pkt.get_destination() != 443 {
return;
}
self.stats.tls_packets_this_period += 1; // (HTTPS, really)
self.stats.tls_bytes_this_period += frame_len as u64;
self.process_tls_pkt(&ip_pkt, &tcp_pkt);
}
// Takes an IPv4 packet
// Assumes (for now) that TLS records are in a single TCP packet
// (no fragmentation).
// Fragments could be stored in the flow_tracker if needed.
pub fn process_tls_pkt(&mut self,
ip_pkt: &Ipv4Packet,
tcp_pkt: &TcpPacket)
{
let flow = Flow::new(ip_pkt, tcp_pkt);
if panic::catch_unwind(||{ tcp_pkt.payload(); }).is_err() {
return;
}
let tcp_flags = tcp_pkt.get_flags();
if (tcp_flags & TcpFlags::SYN) != 0 && (tcp_flags & TcpFlags::ACK) == 0
{
self.stats.port_443_syns_this_period += 1;
self.flow_tracker.begin_tracking_flow(&flow,
tcp_pkt.packet().to_vec());
return;
}
if !self.flow_tracker.tracking_at_all(&flow) {
return;
}
// Note that FINs and RSTs are welcome in consume_tcp_pkt() as well.
if !self.flow_tracker.consume_tcp_pkt_if_passive(&flow, tcp_pkt) {
// EventedSSLEavesdropped::consume_tcp_pkt() said to drop the flow.
self.flow_tracker.drop(&flow);
}
else if self.flow_tracker.is_td(&flow) {
// Forward packets from established overt flows into the tun
// interface, so that they'll reach forge_socket.
self.forward_to_forge_socket(ip_pkt);
if (tcp_flags & TcpFlags::FIN) != 0 {
// This stream (overt flow) is ending. The client might come and
// resume the TapDance session with a new stream, so leave the
// overall session state intact. The is_hup event's processing
// takes care of starting the BufferableSSL's cleanup.
// FlowTracker::notice_fin() will schedule a RST to be sent to
// the decoy server; forge_socket handles the FIN handshake.
self.flow_tracker.notice_fin(&flow);
}
}
else if (tcp_flags & TcpFlags::FIN) != 0 { // non-TD flow FINd => drop
self.flow_tracker.drop(&flow);
return;
}
if (tcp_flags & TcpFlags::RST) != 0 {
// End connection, remove any relevant state.
// TODO clean up TapDance session state, if any
// (TODO i believe that the earlier forward_to_forge_socket would
// cause a clien is_error event to fire, which would then clean up
// the session. should confirm.)
self.flow_tracker.drop(&flow);
return;
}
// This is a non-RST/FIN packet of a flow we are tracking, but that is
// not known to be TapDance. That means this might be a tag-bearing
// first TLS app data packet: establish a TD session if so.
if !self.flow_tracker.is_td(&flow) && is_tls_app_pkt(tcp_pkt) {
// ...buuut don't bother checking these known-irrelevant addresses:
// coming from U. Michigan (35.0.0.0/9)
// going to Google CDN servers in Michigan (192.122.185.0/24)
// coming from windyegret's internet connection (192.122.200.253)
// coming from more of U. Michigan (141.212.0.0/14)
let src = ip_pkt.get_source().octets();
let dest = ip_pkt.get_destination().octets();
if src[0] == 35 && (src[1] & 128 == 0) ||
dest[0] == 192 && dest[1] == 122 && dest[2] == 185 ||
src[0]==192 && src[1]==122 && src[2]==200 && src[3]==253 ||
src[0] == 141 && (src[2] & 252) == 212 ||
!self.try_establish_tapdance(&flow, tcp_pkt)
{
// No tag in first TLS app data packet ==> definitely not TD.
self.flow_tracker.drop(&flow);
}
}
}
fn forward_to_forge_socket(&mut self, ip_pkt: &Ipv4Packet)
{
let ip_len = ip_pkt.packet().len();
// TODO: see if the PI flag to the TUN interface can actually take care
// of this for us
let mut tun_pkt = Vec::with_capacity(ip_len+4);
// These mystery bytes are a link-layer header; the kernel "receives"
// tun packets as if they were really physically "received". Since they
// weren't physically received, they do not have an Ethernet header. It
// looks like the tun setup has its own type of header, rather than just
// making up a fake Ethernet header.
tun_pkt.extend_from_slice(&[0x00, 0x01, 0x08, 0x00]);
tun_pkt.extend_from_slice(ip_pkt.packet());
// Send into tun device (can fail, but these are best-effort IP packets)
self.tun.send(tun_pkt).unwrap_or_else(|e|{
warn!("failed to send packet into tun: {}", e); 0});
self.stats.cli2cov_raw_etherbytes_this_period += ip_len as u64;
}
// Inspects a TLS app data packet for a TapDance tag. If found, establishes
// the flow as a TapDance stream (and starts a new session, if 1st stream).
// Returns true iff a TapDance stream was successfully established.
fn | (&mut self,
flow: &Flow,
tcp_pkt: &TcpPacket) -> bool
{
let tag_payload = elligator::extract_telex_tag(&self.priv_key,
&tcp_pkt.payload());
self.stats.elligator_this_period += 1;
if tag_payload.len() < TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN +
TAG_CLI_RND_LEN + TAG_CON_ID_LEN
{
return false;
}
if tag_payload[0] & 128u8 == 0 { // traditional bidi forged TLS
// Decoy will ACK current packet with this value. (Host-order).
let expect_ack =
tcp_pkt.get_sequence()
.wrapping_add(tcp_pkt.payload().len() as u32);
let wscale_and_mss =
self.flow_tracker.mark_tapdance_flow(flow, expect_ack);
self.establish_bidi(tcp_pkt, flow, &tag_payload, wscale_and_mss)
} else { // upload-only eavesdropped TLS
// (don't mark as TD in FlowTracker until you have the Rc<RefCell>)
self.establish_upload_only(tcp_pkt, flow, &tag_payload)
}
}
pub fn establish_bidi(&mut self,
tcp_pkt: &TcpPacket, flow: &Flow,
tag_payload: &Vec<u8>,
wscale_and_mss: WscaleAndMSS) -> bool
{
let (_, master_key, server_random, client_random, session_id) =
parse_tag_payload(tag_payload);
let (tcp_ts, tcp_ts_ecr) = util::get_tcp_timestamps(tcp_pkt);
let mut client_ssl = BufferableSSL::new(session_id);
let ssl_success =
client_ssl.construct_forged_ssl(
tcp_pkt, flow, &wscale_and_mss, tcp_ts, tcp_ts_ecr,
master_key, client_random, server_random);
if ssl_success {
let (is_a_reconnect, rc, cov_error) =
self.create_or_recall_tapdance_session(session_id);
let ref mut td = rc.borrow_mut();
let tok = self.cli_ssl_driver.tok2sess.insert(rc.clone());
if !td.cli_pair.set_bidi(client_ssl, tok, &mut self.cli_ssl_poll) {
td.end_whole_session_error(SessionError::ClientProtocol);
return false;
}
td.expect_bidi_reconnect = false;
if let Some(cov_err) = cov_error {
td.end_whole_session_error(cov_err | try_establish_tapdance | identifier_name |
shard.go | FlushTimer = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "shard_memory_database_flush_duration",
Help: "Flush memory data duration(ms).",
Buckets: monitoring.DefaultHistogramBuckets,
},
[]string{"db", "shard"},
)
)
func init() {
monitoring.StorageRegistry.MustRegister(buildIndexTimer)
monitoring.StorageRegistry.MustRegister(writeMetricTimer)
monitoring.StorageRegistry.MustRegister(memFlushTimer)
}
const (
replicaDir = "replica"
segmentDir = "segment"
indexParentDir = "index"
forwardIndexDir = "forward"
invertedIndexDir = "inverted"
metaDir = "meta"
tempDir = "temp"
)
// Shard is a horizontal partition of metrics for LinDB.
type Shard interface {
// DatabaseName returns the database name
DatabaseName() string
// ShardID returns the shard id
ShardID() int32
// ShardInfo returns the unique shard info
ShardInfo() string
// GetDataFamilies returns data family list by interval type and time range, return nil if not match
GetDataFamilies(intervalType timeutil.IntervalType, timeRange timeutil.TimeRange) []DataFamily
// MemoryDatabase returns memory database by given family time.
MemoryDatabase(familyTime int64) (memdb.MemoryDatabase, error)
// IndexDatabase returns the index-database
IndexDatabase() indexdb.IndexDatabase
// Write writes the metric-point into memory-database.
Write(metric *pb.Metric) error
// GetOrCreateSequence gets the replica sequence by given remote peer if exist, else creates a new sequence
GetOrCreateSequence(replicaPeer string) (replication.Sequence, error)
// Close releases shard's resource, such as flush data, spawned goroutines etc.
io.Closer
// Flush flushes index and memory data to disk
Flush() error
// NeedFlush checks if shard need to flush memory data
NeedFlush() bool
// IsFlushing checks if this shard is in flushing
IsFlushing() bool
// initIndexDatabase initializes index database
initIndexDatabase() error
}
// shard implements Shard interface
// directory tree:
// xx/shard/1/ (path)
// xx/shard/1/replica
// xx/shard/1/temp/123213123131 // time of ns
// xx/shard/1/meta/
// xx/shard/1/index/inverted/
// xx/shard/1/data/20191012/
// xx/shard/1/data/20191013/
type shard struct {
databaseName string
id int32
path string
option option.DatabaseOption
sequence ReplicaSequence
families map[int64]memdb.MemoryDatabase // memory database for each family time
indexDB indexdb.IndexDatabase
metadata metadb.Metadata
// write accept time range
interval timeutil.Interval
ahead timeutil.Interval
behind timeutil.Interval
// segments keeps all interval segments,
// includes one smallest interval segment for writing data, and rollup interval segments
segments map[timeutil.IntervalType]IntervalSegment
segment IntervalSegment // smallest interval for writing data
isFlushing atomic.Bool // restrict flusher concurrency
flushCondition sync.WaitGroup // flush condition
indexStore kv.Store // kv stores
forwardFamily kv.Family // forward store
invertedFamily kv.Family // inverted store
rwMutex sync.RWMutex
buildIndexTimer prometheus.Observer
writeMetricTimer prometheus.Observer
memFlushTimer prometheus.Observer
}
// newShard creates shard instance, if shard path exist then load shard data for init.
// return error if fail.
func newShard(
db Database,
shardID int32,
shardPath string,
option option.DatabaseOption,
) (Shard, error) {
var err error
if err = option.Validate(); err != nil {
return nil, fmt.Errorf("engine option is invalid, err: %s", err)
}
var interval timeutil.Interval
_ = interval.ValueOf(option.Interval)
if err := mkDirIfNotExist(shardPath); err != nil {
return nil, err
}
replicaSequence, err := newReplicaSequenceFunc(filepath.Join(shardPath, replicaDir))
if err != nil {
return nil, err
}
shardIDStr := strconv.Itoa(int(shardID))
createdShard := &shard{
databaseName: db.Name(),
id: shardID,
path: shardPath,
option: option,
sequence: replicaSequence,
families: make(map[int64]memdb.MemoryDatabase),
metadata: db.Metadata(),
interval: interval,
segments: make(map[timeutil.IntervalType]IntervalSegment),
isFlushing: *atomic.NewBool(false),
buildIndexTimer: buildIndexTimer.WithLabelValues(db.Name(), shardIDStr),
writeMetricTimer: writeMetricTimer.WithLabelValues(db.Name(), shardIDStr),
memFlushTimer: memFlushTimer.WithLabelValues(db.Name(), shardIDStr),
}
// new segment for writing
createdShard.segment, err = newIntervalSegmentFunc(
interval,
filepath.Join(shardPath, segmentDir, interval.Type().String()))
if err != nil {
return nil, err
}
_ = createdShard.ahead.ValueOf(option.Ahead)
_ = createdShard.behind.ValueOf(option.Behind)
// add writing segment into segment list
createdShard.segments[interval.Type()] = createdShard.segment
defer func() {
if err != nil {
if err := createdShard.Close(); err != nil {
engineLogger.Error("close shard error when create shard fail",
logger.String("shard", createdShard.path), logger.Error(err))
}
}
}()
if err = createdShard.initIndexDatabase(); err != nil {
return nil, fmt.Errorf("create index database for shard[%d] error: %s", shardID, err)
}
// add shard into global shard manager
GetShardManager().AddShard(createdShard)
return createdShard, nil
}
// DatabaseName returns the database name
func (s *shard) DatabaseName() string {
return s.databaseName
}
// ShardID returns the shard id
func (s *shard) ShardID() int32 {
return s.id
}
// ShardInfo returns the unique shard info
func (s *shard) ShardInfo() string {
return s.path
}
func (s *shard) GetOrCreateSequence(replicaPeer string) (replication.Sequence, error) {
return s.sequence.getOrCreateSequence(replicaPeer)
}
func (s *shard) IndexDatabase() indexdb.IndexDatabase {
return s.indexDB
}
func (s *shard) GetDataFamilies(intervalType timeutil.IntervalType, timeRange timeutil.TimeRange) []DataFamily {
segment, ok := s.segments[intervalType]
if ok {
return segment.getDataFamilies(timeRange)
}
return nil
}
// MemoryDatabase returns memory database by given family time.
func (s *shard) | (familyTime int64) (memdb.MemoryDatabase, error) {
var memDB memdb.MemoryDatabase
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
memDB, ok := s.families[familyTime]
if !ok {
memDB, err := s.createMemoryDatabase()
if err != nil {
return nil, err
}
s.families[familyTime] = memDB
}
return memDB, nil
}
// Write writes the metric-point into memory-database.
func (s *shard) Write(metric *pb.Metric) (err error) {
if metric == nil {
return constants.ErrNilMetric
}
if len(metric.Name) == 0 {
return constants.ErrEmptyMetricName
}
if len(metric.Fields) == 0 {
return constants.ErrEmptyField
}
timestamp := metric.Timestamp
now := timeutil.Now()
// check metric timestamp if in acceptable time range
if (s.behind.Int64() > 0 && timestamp < now-s.behind.Int64()) ||
(s.ahead.Int64() > 0 && timestamp > now+s.ahead.Int64()) {
return nil
}
ns := metric.Namespace
if len(ns) == 0 {
ns = constants.DefaultNamespace
}
metricID, err := s.metadata.MetadataDatabase().GenMetricID(ns, metric.Name)
if err != nil {
return err
}
var seriesID uint32
isCreated := false
if len(metric.Tags) == 0 {
// if metric without tags, uses default series id(0)
seriesID = constants.SeriesIDWithoutTags
} else {
seriesID, isCreated, err = s.indexDB.GetOrCreateSeriesID(metricID, metric.TagsHash)
if err != nil {
return err
}
}
if isCreated {
// if series id is new, need build inverted index
s.indexDB.BuildInvertIndex(ns, metric.Name, metric.Tags, seriesID)
| MemoryDatabase | identifier_name |
shard.go | FlushTimer = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "shard_memory_database_flush_duration",
Help: "Flush memory data duration(ms).",
Buckets: monitoring.DefaultHistogramBuckets,
},
[]string{"db", "shard"},
)
)
func init() {
monitoring.StorageRegistry.MustRegister(buildIndexTimer)
monitoring.StorageRegistry.MustRegister(writeMetricTimer)
monitoring.StorageRegistry.MustRegister(memFlushTimer)
}
const (
replicaDir = "replica"
segmentDir = "segment"
indexParentDir = "index"
forwardIndexDir = "forward"
invertedIndexDir = "inverted"
metaDir = "meta"
tempDir = "temp"
)
// Shard is a horizontal partition of metrics for LinDB.
type Shard interface {
// DatabaseName returns the database name
DatabaseName() string
// ShardID returns the shard id
ShardID() int32
// ShardInfo returns the unique shard info
ShardInfo() string
// GetDataFamilies returns data family list by interval type and time range, return nil if not match
GetDataFamilies(intervalType timeutil.IntervalType, timeRange timeutil.TimeRange) []DataFamily
// MemoryDatabase returns memory database by given family time.
MemoryDatabase(familyTime int64) (memdb.MemoryDatabase, error)
// IndexDatabase returns the index-database
IndexDatabase() indexdb.IndexDatabase
// Write writes the metric-point into memory-database.
Write(metric *pb.Metric) error
// GetOrCreateSequence gets the replica sequence by given remote peer if exist, else creates a new sequence
GetOrCreateSequence(replicaPeer string) (replication.Sequence, error)
// Close releases shard's resource, such as flush data, spawned goroutines etc.
io.Closer
// Flush flushes index and memory data to disk
Flush() error
// NeedFlush checks if shard need to flush memory data
NeedFlush() bool
// IsFlushing checks if this shard is in flushing
IsFlushing() bool
// initIndexDatabase initializes index database
initIndexDatabase() error
}
// shard implements Shard interface
// directory tree:
// xx/shard/1/ (path)
// xx/shard/1/replica
// xx/shard/1/temp/123213123131 // time of ns
// xx/shard/1/meta/ | databaseName string
id int32
path string
option option.DatabaseOption
sequence ReplicaSequence
families map[int64]memdb.MemoryDatabase // memory database for each family time
indexDB indexdb.IndexDatabase
metadata metadb.Metadata
// write accept time range
interval timeutil.Interval
ahead timeutil.Interval
behind timeutil.Interval
// segments keeps all interval segments,
// includes one smallest interval segment for writing data, and rollup interval segments
segments map[timeutil.IntervalType]IntervalSegment
segment IntervalSegment // smallest interval for writing data
isFlushing atomic.Bool // restrict flusher concurrency
flushCondition sync.WaitGroup // flush condition
indexStore kv.Store // kv stores
forwardFamily kv.Family // forward store
invertedFamily kv.Family // inverted store
rwMutex sync.RWMutex
buildIndexTimer prometheus.Observer
writeMetricTimer prometheus.Observer
memFlushTimer prometheus.Observer
}
// newShard creates shard instance, if shard path exist then load shard data for init.
// return error if fail.
func newShard(
db Database,
shardID int32,
shardPath string,
option option.DatabaseOption,
) (Shard, error) {
var err error
if err = option.Validate(); err != nil {
return nil, fmt.Errorf("engine option is invalid, err: %s", err)
}
var interval timeutil.Interval
_ = interval.ValueOf(option.Interval)
if err := mkDirIfNotExist(shardPath); err != nil {
return nil, err
}
replicaSequence, err := newReplicaSequenceFunc(filepath.Join(shardPath, replicaDir))
if err != nil {
return nil, err
}
shardIDStr := strconv.Itoa(int(shardID))
createdShard := &shard{
databaseName: db.Name(),
id: shardID,
path: shardPath,
option: option,
sequence: replicaSequence,
families: make(map[int64]memdb.MemoryDatabase),
metadata: db.Metadata(),
interval: interval,
segments: make(map[timeutil.IntervalType]IntervalSegment),
isFlushing: *atomic.NewBool(false),
buildIndexTimer: buildIndexTimer.WithLabelValues(db.Name(), shardIDStr),
writeMetricTimer: writeMetricTimer.WithLabelValues(db.Name(), shardIDStr),
memFlushTimer: memFlushTimer.WithLabelValues(db.Name(), shardIDStr),
}
// new segment for writing
createdShard.segment, err = newIntervalSegmentFunc(
interval,
filepath.Join(shardPath, segmentDir, interval.Type().String()))
if err != nil {
return nil, err
}
_ = createdShard.ahead.ValueOf(option.Ahead)
_ = createdShard.behind.ValueOf(option.Behind)
// add writing segment into segment list
createdShard.segments[interval.Type()] = createdShard.segment
defer func() {
if err != nil {
if err := createdShard.Close(); err != nil {
engineLogger.Error("close shard error when create shard fail",
logger.String("shard", createdShard.path), logger.Error(err))
}
}
}()
if err = createdShard.initIndexDatabase(); err != nil {
return nil, fmt.Errorf("create index database for shard[%d] error: %s", shardID, err)
}
// add shard into global shard manager
GetShardManager().AddShard(createdShard)
return createdShard, nil
}
// DatabaseName returns the database name
func (s *shard) DatabaseName() string {
return s.databaseName
}
// ShardID returns the shard id
func (s *shard) ShardID() int32 {
return s.id
}
// ShardInfo returns the unique shard info
func (s *shard) ShardInfo() string {
return s.path
}
func (s *shard) GetOrCreateSequence(replicaPeer string) (replication.Sequence, error) {
return s.sequence.getOrCreateSequence(replicaPeer)
}
func (s *shard) IndexDatabase() indexdb.IndexDatabase {
return s.indexDB
}
func (s *shard) GetDataFamilies(intervalType timeutil.IntervalType, timeRange timeutil.TimeRange) []DataFamily {
segment, ok := s.segments[intervalType]
if ok {
return segment.getDataFamilies(timeRange)
}
return nil
}
// MemoryDatabase returns memory database by given family time.
func (s *shard) MemoryDatabase(familyTime int64) (memdb.MemoryDatabase, error) {
var memDB memdb.MemoryDatabase
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
memDB, ok := s.families[familyTime]
if !ok {
memDB, err := s.createMemoryDatabase()
if err != nil {
return nil, err
}
s.families[familyTime] = memDB
}
return memDB, nil
}
// Write writes the metric-point into memory-database.
func (s *shard) Write(metric *pb.Metric) (err error) {
if metric == nil {
return constants.ErrNilMetric
}
if len(metric.Name) == 0 {
return constants.ErrEmptyMetricName
}
if len(metric.Fields) == 0 {
return constants.ErrEmptyField
}
timestamp := metric.Timestamp
now := timeutil.Now()
// check metric timestamp if in acceptable time range
if (s.behind.Int64() > 0 && timestamp < now-s.behind.Int64()) ||
(s.ahead.Int64() > 0 && timestamp > now+s.ahead.Int64()) {
return nil
}
ns := metric.Namespace
if len(ns) == 0 {
ns = constants.DefaultNamespace
}
metricID, err := s.metadata.MetadataDatabase().GenMetricID(ns, metric.Name)
if err != nil {
return err
}
var seriesID uint32
isCreated := false
if len(metric.Tags) == 0 {
// if metric without tags, uses default series id(0)
seriesID = constants.SeriesIDWithoutTags
} else {
seriesID, isCreated, err = s.indexDB.GetOrCreateSeriesID(metricID, metric.TagsHash)
if err != nil {
return err
}
}
if isCreated {
// if series id is new, need build inverted index
s.indexDB.BuildInvertIndex(ns, metric.Name, metric.Tags, seriesID)
}
| // xx/shard/1/index/inverted/
// xx/shard/1/data/20191012/
// xx/shard/1/data/20191013/
type shard struct { | random_line_split |
shard.go | util.Interval
_ = interval.ValueOf(option.Interval)
if err := mkDirIfNotExist(shardPath); err != nil {
return nil, err
}
replicaSequence, err := newReplicaSequenceFunc(filepath.Join(shardPath, replicaDir))
if err != nil {
return nil, err
}
shardIDStr := strconv.Itoa(int(shardID))
createdShard := &shard{
databaseName: db.Name(),
id: shardID,
path: shardPath,
option: option,
sequence: replicaSequence,
families: make(map[int64]memdb.MemoryDatabase),
metadata: db.Metadata(),
interval: interval,
segments: make(map[timeutil.IntervalType]IntervalSegment),
isFlushing: *atomic.NewBool(false),
buildIndexTimer: buildIndexTimer.WithLabelValues(db.Name(), shardIDStr),
writeMetricTimer: writeMetricTimer.WithLabelValues(db.Name(), shardIDStr),
memFlushTimer: memFlushTimer.WithLabelValues(db.Name(), shardIDStr),
}
// new segment for writing
createdShard.segment, err = newIntervalSegmentFunc(
interval,
filepath.Join(shardPath, segmentDir, interval.Type().String()))
if err != nil {
return nil, err
}
_ = createdShard.ahead.ValueOf(option.Ahead)
_ = createdShard.behind.ValueOf(option.Behind)
// add writing segment into segment list
createdShard.segments[interval.Type()] = createdShard.segment
defer func() {
if err != nil {
if err := createdShard.Close(); err != nil {
engineLogger.Error("close shard error when create shard fail",
logger.String("shard", createdShard.path), logger.Error(err))
}
}
}()
if err = createdShard.initIndexDatabase(); err != nil {
return nil, fmt.Errorf("create index database for shard[%d] error: %s", shardID, err)
}
// add shard into global shard manager
GetShardManager().AddShard(createdShard)
return createdShard, nil
}
// DatabaseName returns the database name
func (s *shard) DatabaseName() string {
return s.databaseName
}
// ShardID returns the shard id
func (s *shard) ShardID() int32 {
return s.id
}
// ShardInfo returns the unique shard info
func (s *shard) ShardInfo() string {
return s.path
}
func (s *shard) GetOrCreateSequence(replicaPeer string) (replication.Sequence, error) {
return s.sequence.getOrCreateSequence(replicaPeer)
}
func (s *shard) IndexDatabase() indexdb.IndexDatabase {
return s.indexDB
}
func (s *shard) GetDataFamilies(intervalType timeutil.IntervalType, timeRange timeutil.TimeRange) []DataFamily {
segment, ok := s.segments[intervalType]
if ok {
return segment.getDataFamilies(timeRange)
}
return nil
}
// MemoryDatabase returns memory database by given family time.
func (s *shard) MemoryDatabase(familyTime int64) (memdb.MemoryDatabase, error) {
var memDB memdb.MemoryDatabase
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
memDB, ok := s.families[familyTime]
if !ok {
memDB, err := s.createMemoryDatabase()
if err != nil {
return nil, err
}
s.families[familyTime] = memDB
}
return memDB, nil
}
// Write writes the metric-point into memory-database.
func (s *shard) Write(metric *pb.Metric) (err error) {
if metric == nil {
return constants.ErrNilMetric
}
if len(metric.Name) == 0 {
return constants.ErrEmptyMetricName
}
if len(metric.Fields) == 0 {
return constants.ErrEmptyField
}
timestamp := metric.Timestamp
now := timeutil.Now()
// check metric timestamp if in acceptable time range
if (s.behind.Int64() > 0 && timestamp < now-s.behind.Int64()) ||
(s.ahead.Int64() > 0 && timestamp > now+s.ahead.Int64()) {
return nil
}
ns := metric.Namespace
if len(ns) == 0 {
ns = constants.DefaultNamespace
}
metricID, err := s.metadata.MetadataDatabase().GenMetricID(ns, metric.Name)
if err != nil {
return err
}
var seriesID uint32
isCreated := false
if len(metric.Tags) == 0 {
// if metric without tags, uses default series id(0)
seriesID = constants.SeriesIDWithoutTags
} else {
seriesID, isCreated, err = s.indexDB.GetOrCreateSeriesID(metricID, metric.TagsHash)
if err != nil {
return err
}
}
if isCreated {
// if series id is new, need build inverted index
s.indexDB.BuildInvertIndex(ns, metric.Name, metric.Tags, seriesID)
}
buildIndexEnd := timeutil.Now()
s.buildIndexTimer.Observe(float64(buildIndexEnd - now))
// calculate family start time and slot index
intervalCalc := s.interval.Calculator()
segmentTime := intervalCalc.CalcSegmentTime(timestamp) // day
family := intervalCalc.CalcFamily(timestamp, segmentTime) // hours
familyTime := intervalCalc.CalcFamilyStartTime(segmentTime, family) // family timestamp
db, err := s.MemoryDatabase(familyTime)
if err != nil {
return err
}
// mark writing data
db.AcquireWrite()
// set write completed
defer func() {
db.CompleteWrite()
s.writeMetricTimer.Observe(float64(timeutil.Now() - buildIndexEnd))
}()
slotIndex := uint16(intervalCalc.CalcSlot(timestamp, familyTime, s.interval.Int64())) // slot offset of family
// write metric point into memory db
return db.Write(ns, metric.Name, metricID, seriesID, slotIndex, metric.Fields)
}
func (s *shard) Close() error {
// wait previous flush job completed
s.flushCondition.Wait()
GetShardManager().RemoveShard(s)
if s.indexDB != nil {
if err := s.indexDB.Close(); err != nil {
return err
}
}
if s.indexStore != nil {
if err := s.indexStore.Close(); err != nil {
return err
}
}
for _, family := range s.families {
if err := s.flushMemoryDatabase(family); err != nil {
return err
}
}
s.ackReplicaSeq()
return s.sequence.Close()
}
// IsFlushing checks if this shard is in flushing
func (s *shard) IsFlushing() bool { return s.isFlushing.Load() }
// NeedFlush checks if shard need to flush memory data
func (s *shard) NeedFlush() bool {
if s.IsFlushing() {
return false
}
for _, memDB := range s.families {
//TODO add time threshold???
return memDB.MemSize() > constants.ShardMemoryUsedThreshold
}
return false
}
// Flush flushes index and memory data to disk
func (s *shard) Flush() (err error) {
// another flush process is running
if !s.isFlushing.CAS(false, true) {
return nil
}
// 1. mark flush job doing
s.flushCondition.Add(1)
defer func() {
//TODO add commit kv meta after ack successfully
// mark flush job complete, notify
s.flushCondition.Done()
s.isFlushing.Store(false)
}()
//FIXME stone1100
// index flush
if s.indexDB != nil {
if err = s.indexDB.Flush(); err != nil {
return err
}
}
// flush memory database if need flush
for _, memDB := range s.families {
//TODO add time threshold???
if memDB.MemSize() > constants.ShardMemoryUsedThreshold {
if err := s.flushMemoryDatabase(memDB); err != nil {
return err
}
}
}
//FIXME(stone1100) need remove memory database if long time no data
// finally, commit replica sequence
s.ackReplicaSeq()
return nil
}
// initIndexDatabase initializes the index database
func (s *shard) initIndexDatabase() error | {
var err error
storeOption := kv.DefaultStoreOption(filepath.Join(s.path, indexParentDir))
s.indexStore, err = newKVStoreFunc(storeOption.Path, storeOption)
if err != nil {
return err
}
s.forwardFamily, err = s.indexStore.CreateFamily(
forwardIndexDir,
kv.FamilyOption{
CompactThreshold: 0,
Merger: string(invertedindex.SeriesForwardMerger)})
if err != nil {
return err
}
s.invertedFamily, err = s.indexStore.CreateFamily(
invertedIndexDir,
kv.FamilyOption{
CompactThreshold: 0,
Merger: string(invertedindex.SeriesInvertedMerger)}) | identifier_body |
|
shard.go | // GetOrCreateSequence gets the replica sequence by given remote peer if exist, else creates a new sequence
GetOrCreateSequence(replicaPeer string) (replication.Sequence, error)
// Close releases shard's resource, such as flush data, spawned goroutines etc.
io.Closer
// Flush flushes index and memory data to disk
Flush() error
// NeedFlush checks if shard need to flush memory data
NeedFlush() bool
// IsFlushing checks if this shard is in flushing
IsFlushing() bool
// initIndexDatabase initializes index database
initIndexDatabase() error
}
// shard implements Shard interface
// directory tree:
// xx/shard/1/ (path)
// xx/shard/1/replica
// xx/shard/1/temp/123213123131 // time of ns
// xx/shard/1/meta/
// xx/shard/1/index/inverted/
// xx/shard/1/data/20191012/
// xx/shard/1/data/20191013/
type shard struct {
databaseName string
id int32
path string
option option.DatabaseOption
sequence ReplicaSequence
families map[int64]memdb.MemoryDatabase // memory database for each family time
indexDB indexdb.IndexDatabase
metadata metadb.Metadata
// write accept time range
interval timeutil.Interval
ahead timeutil.Interval
behind timeutil.Interval
// segments keeps all interval segments,
// includes one smallest interval segment for writing data, and rollup interval segments
segments map[timeutil.IntervalType]IntervalSegment
segment IntervalSegment // smallest interval for writing data
isFlushing atomic.Bool // restrict flusher concurrency
flushCondition sync.WaitGroup // flush condition
indexStore kv.Store // kv stores
forwardFamily kv.Family // forward store
invertedFamily kv.Family // inverted store
rwMutex sync.RWMutex
buildIndexTimer prometheus.Observer
writeMetricTimer prometheus.Observer
memFlushTimer prometheus.Observer
}
// newShard creates shard instance, if shard path exist then load shard data for init.
// return error if fail.
func newShard(
db Database,
shardID int32,
shardPath string,
option option.DatabaseOption,
) (Shard, error) {
var err error
if err = option.Validate(); err != nil {
return nil, fmt.Errorf("engine option is invalid, err: %s", err)
}
var interval timeutil.Interval
_ = interval.ValueOf(option.Interval)
if err := mkDirIfNotExist(shardPath); err != nil {
return nil, err
}
replicaSequence, err := newReplicaSequenceFunc(filepath.Join(shardPath, replicaDir))
if err != nil {
return nil, err
}
shardIDStr := strconv.Itoa(int(shardID))
createdShard := &shard{
databaseName: db.Name(),
id: shardID,
path: shardPath,
option: option,
sequence: replicaSequence,
families: make(map[int64]memdb.MemoryDatabase),
metadata: db.Metadata(),
interval: interval,
segments: make(map[timeutil.IntervalType]IntervalSegment),
isFlushing: *atomic.NewBool(false),
buildIndexTimer: buildIndexTimer.WithLabelValues(db.Name(), shardIDStr),
writeMetricTimer: writeMetricTimer.WithLabelValues(db.Name(), shardIDStr),
memFlushTimer: memFlushTimer.WithLabelValues(db.Name(), shardIDStr),
}
// new segment for writing
createdShard.segment, err = newIntervalSegmentFunc(
interval,
filepath.Join(shardPath, segmentDir, interval.Type().String()))
if err != nil {
return nil, err
}
_ = createdShard.ahead.ValueOf(option.Ahead)
_ = createdShard.behind.ValueOf(option.Behind)
// add writing segment into segment list
createdShard.segments[interval.Type()] = createdShard.segment
defer func() {
if err != nil {
if err := createdShard.Close(); err != nil {
engineLogger.Error("close shard error when create shard fail",
logger.String("shard", createdShard.path), logger.Error(err))
}
}
}()
if err = createdShard.initIndexDatabase(); err != nil {
return nil, fmt.Errorf("create index database for shard[%d] error: %s", shardID, err)
}
// add shard into global shard manager
GetShardManager().AddShard(createdShard)
return createdShard, nil
}
// DatabaseName returns the database name
func (s *shard) DatabaseName() string {
return s.databaseName
}
// ShardID returns the shard id
func (s *shard) ShardID() int32 {
return s.id
}
// ShardInfo returns the unique shard info
func (s *shard) ShardInfo() string {
return s.path
}
func (s *shard) GetOrCreateSequence(replicaPeer string) (replication.Sequence, error) {
return s.sequence.getOrCreateSequence(replicaPeer)
}
func (s *shard) IndexDatabase() indexdb.IndexDatabase {
return s.indexDB
}
func (s *shard) GetDataFamilies(intervalType timeutil.IntervalType, timeRange timeutil.TimeRange) []DataFamily {
segment, ok := s.segments[intervalType]
if ok {
return segment.getDataFamilies(timeRange)
}
return nil
}
// MemoryDatabase returns memory database by given family time.
func (s *shard) MemoryDatabase(familyTime int64) (memdb.MemoryDatabase, error) {
var memDB memdb.MemoryDatabase
s.rwMutex.RLock()
defer s.rwMutex.RUnlock()
memDB, ok := s.families[familyTime]
if !ok {
memDB, err := s.createMemoryDatabase()
if err != nil {
return nil, err
}
s.families[familyTime] = memDB
}
return memDB, nil
}
// Write writes the metric-point into memory-database.
func (s *shard) Write(metric *pb.Metric) (err error) {
if metric == nil {
return constants.ErrNilMetric
}
if len(metric.Name) == 0 {
return constants.ErrEmptyMetricName
}
if len(metric.Fields) == 0 {
return constants.ErrEmptyField
}
timestamp := metric.Timestamp
now := timeutil.Now()
// check metric timestamp if in acceptable time range
if (s.behind.Int64() > 0 && timestamp < now-s.behind.Int64()) ||
(s.ahead.Int64() > 0 && timestamp > now+s.ahead.Int64()) {
return nil
}
ns := metric.Namespace
if len(ns) == 0 {
ns = constants.DefaultNamespace
}
metricID, err := s.metadata.MetadataDatabase().GenMetricID(ns, metric.Name)
if err != nil {
return err
}
var seriesID uint32
isCreated := false
if len(metric.Tags) == 0 {
// if metric without tags, uses default series id(0)
seriesID = constants.SeriesIDWithoutTags
} else {
seriesID, isCreated, err = s.indexDB.GetOrCreateSeriesID(metricID, metric.TagsHash)
if err != nil {
return err
}
}
if isCreated {
// if series id is new, need build inverted index
s.indexDB.BuildInvertIndex(ns, metric.Name, metric.Tags, seriesID)
}
buildIndexEnd := timeutil.Now()
s.buildIndexTimer.Observe(float64(buildIndexEnd - now))
// calculate family start time and slot index
intervalCalc := s.interval.Calculator()
segmentTime := intervalCalc.CalcSegmentTime(timestamp) // day
family := intervalCalc.CalcFamily(timestamp, segmentTime) // hours
familyTime := intervalCalc.CalcFamilyStartTime(segmentTime, family) // family timestamp
db, err := s.MemoryDatabase(familyTime)
if err != nil {
return err
}
// mark writing data
db.AcquireWrite()
// set write completed
defer func() {
db.CompleteWrite()
s.writeMetricTimer.Observe(float64(timeutil.Now() - buildIndexEnd))
}()
slotIndex := uint16(intervalCalc.CalcSlot(timestamp, familyTime, s.interval.Int64())) // slot offset of family
// write metric point into memory db
return db.Write(ns, metric.Name, metricID, seriesID, slotIndex, metric.Fields)
}
func (s *shard) Close() error {
// wait previous flush job completed
s.flushCondition.Wait()
GetShardManager().RemoveShard(s)
if s.indexDB != nil {
if err := s.indexDB.Close(); err != nil {
return err
}
}
if s.indexStore != nil {
if err := s.indexStore.Close(); err != nil {
return err
}
}
for _, family := range s.families {
if err := s.flushMemoryDatabase(family); err != nil | {
return err
} | conditional_block |
|
agent.rs | recover(handle_errors)
.unify()
}
fn endpoint_config(
caps: Capabilities,
data: Arc<Data>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
data.agents.add_capabilities(&auth.name, &caps)?;
Ok(ApiResponse::Success {
result: AgentConfig {
agent_name: auth.name,
crater_config: data.config.clone(),
},
}
.into_response()?)
}
fn endpoint_next_experiment(
mutex: Arc<Mutex<Data>>,
github_data: Option<Arc<GithubData>>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
//we need to make sure that Experiment::next executes uninterrupted
let data = mutex.lock().unwrap();
let next = Experiment::next(&data.db, &Assignee::Agent(auth.name))?;
let result = if let Some((new, ex)) = next {
if new {
if let Some(github_data) = github_data.as_ref() {
if let Some(ref github_issue) = ex.github_issue {
Message::new()
.line(
"construction",
format!("Experiment **`{}`** is now **running**", ex.name,),
)
.send(&github_issue.api_url, &data, github_data)?;
}
}
}
Some(ex)
} else {
None
};
Ok(ApiResponse::Success { result }.into_response()?)
}
fn endpoint_next_crate(
experiment: String,
data: Arc<Data>,
_auth: AuthDetails,
) -> Fallible<Response<Body>> {
let result: Option<crate::crates::Crate> =
if let Some(ex) = Experiment::get(&data.db, &experiment)? {
let mut crates = ex.get_uncompleted_crates(&data.db, Some(1))?;
if crates.is_empty() {
None
} else {
Some(crates.remove(0))
}
} else {
None
};
Ok(ApiResponse::Success { result }.into_response()?)
}
#[derive(Clone)]
pub struct RecordProgressThread {
// String is the worker name
queue: Sender<(ExperimentData<ProgressData>, String)>,
in_flight_requests: Arc<(Mutex<usize>, Condvar)>,
}
impl RecordProgressThread {
pub fn new(
db: crate::db::Database,
metrics: crate::server::metrics::Metrics,
) -> RecordProgressThread {
// 64 message queue, after which we start load shedding automatically.
let (tx, rx) = crossbeam_channel::bounded(64);
let in_flight_requests = Arc::new((Mutex::new(0), Condvar::new()));
let this = RecordProgressThread {
queue: tx,
in_flight_requests,
};
let ret = this.clone();
std::thread::spawn(move || loop {
// Panics should already be logged and otherwise there's not much we
// can/should do.
let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
let (result, worker_name) = rx.recv().unwrap();
this.block_until_idle();
let start = std::time::Instant::now();
if let Some(ex) = Experiment::get(&db, &result.experiment_name).unwrap() {
let db = DatabaseDB::new(&db);
if let Err(e) = db.store(&ex, &result.data, EncodingType::Plain) {
// Failing to record a result is basically fine -- this
// just means that we'll have to re-try this job.
log::error!("Failed to store result into database: {:?}", e);
crate::utils::report_failure(&e);
}
metrics.record_completed_jobs(
&worker_name,
&ex.name,
result.data.results.len() as i64,
);
if let Err(e) = db.clear_stale_records() {
// Not a hard failure. We can continue even if we failed
// to clear records from already completed runs...
log::error!("Failed to clear stale records: {:?}", e);
crate::utils::report_failure(&e);
}
metrics
.crater_endpoint_time
.with_label_values(&["record_progress"])
.observe(start.elapsed().as_secs_f64());
}
}));
});
ret
}
pub fn block_until_idle(&self) {
// Wait until there are zero in-flight requests.
//
// Note: We do **not** keep the lock here for the subsequent
// computation. That means that if we ever observe zero, then we're
// going to kick off the below computation; obviously requests may keep
// coming in -- we don't want to block those requests.
//
// The expectation that we will see zero here also implies that
// the server is *sometimes* idle (i.e., we are not constantly
// processing requests at 100% load). It's not clear that's 100%
// a valid assumption, but if we are at 100% load in terms of
// requests coming in, that's a problem in and of itself (since
// the majority of expected requests are record-progress, which
// should be *very* fast now that the work for them is async and
// offloaded to this thread).
//
// Ignore the mutex guard (see above).
drop(
self.in_flight_requests
.1
.wait_while(
self.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()),
|g| *g != 0,
)
.unwrap_or_else(|g| g.into_inner()),
);
}
pub fn start_request(&self) -> RequestGuard {
*self
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) += 1;
RequestGuard {
thread: self.clone(),
}
}
}
pub struct RequestGuard {
thread: RecordProgressThread,
}
impl Drop for RequestGuard {
fn drop(&mut self) {
*self
.thread
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) -= 1;
self.thread.in_flight_requests.1.notify_one();
}
}
// This endpoint does not use the mutex data wrapper to exclude running in
// parallel with other endpoints, which may mean that we (for example) are
// recording results for an abort'd experiment. This should generally be fine --
// the database already has foreign_keys enabled and that should ensure
// appropriate synchronization elsewhere. (If it doesn't, that's mostly a bug
// elsewhere, not here).
//
// In practice it's pretty likely that we won't fully run in parallel anyway,
// but this lets some of the work proceed without the lock being held, which is
// generally positive.
fn endpoint_record_progress(
result: ExperimentData<ProgressData>,
data: Arc<Data>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
match data
.record_progress_worker
.queue
.try_send((result, auth.name))
{
Ok(()) => Ok(ApiResponse::Success { result: true }.into_response()?),
Err(crossbeam_channel::TrySendError::Full(_)) => {
data.metrics.crater_bounced_record_progress.inc_by(1);
Ok(ApiResponse::<()>::SlowDown.into_response()?)
}
Err(crossbeam_channel::TrySendError::Disconnected(_)) => unreachable!(),
}
}
fn endpoint_heartbeat(data: Arc<Data>, auth: AuthDetails) -> Fallible<Response<Body>> {
if let Some(rev) = auth.git_revision {
data.agents.set_git_revision(&auth.name, &rev)?;
}
data.agents.record_heartbeat(&auth.name)?;
Ok(ApiResponse::Success { result: true }.into_response()?)
}
fn endpoint_error(
error: ExperimentData<HashMap<String, String>>,
mutex: Arc<Mutex<Data>>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
log::error!(
"agent {} failed while running {}: {:?}",
auth.name,
error.experiment_name,
error.data.get("error")
);
let data = mutex.lock().unwrap();
let ex = Experiment::get(&data.db, &error.experiment_name)?
.ok_or_else(|| err_msg("no experiment run by this agent"))?;
data.metrics.record_error(&auth.name, &ex.name);
Ok(ApiResponse::Success { result: true }.into_response()?)
}
fn handle_results(resp: Fallible<Response<Body>>) -> Response<Body> {
match resp {
Ok(resp) => resp,
Err(err) => ApiResponse::internal_error(err.to_string())
.into_response()
.unwrap(),
}
}
async fn handle_errors(err: Rejection) -> Result<Response<Body>, Rejection> | {
let error = if let Some(compat) = err.find::<Compat<HttpError>>() {
Some(*compat.get_ref())
} else if err.is_not_found() {
Some(HttpError::NotFound)
} else {
None
};
match error {
Some(HttpError::NotFound) => Ok(ApiResponse::not_found().into_response().unwrap()),
Some(HttpError::Forbidden) => Ok(ApiResponse::unauthorized().into_response().unwrap()),
None => Err(err),
}
} | identifier_body |
|
agent.rs | Error = Rejection> + Clone {
let data_cloned = data.clone();
let data_filter = warp::any().map(move || data_cloned.clone());
let mutex_filter = warp::any().map(move || mutex.clone());
let github_data_filter = warp::any().map(move || github_data.clone());
let config = warp::post()
.and(warp::path("config"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_config);
let next_experiment = warp::post()
.and(warp::path("next-experiment"))
.and(warp::path::end())
.and(mutex_filter.clone())
.and(github_data_filter)
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_next_experiment);
let next_crate = warp::post()
.and(warp::path("next-crate"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_next_crate);
let record_progress = warp::post()
.and(warp::path("record-progress"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_record_progress);
let heartbeat = warp::post()
.and(warp::path("heartbeat"))
.and(warp::path::end())
.and(data_filter)
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_heartbeat);
let error = warp::post()
.and(warp::path("error"))
.and(warp::path::end())
.and(warp::body::json())
.and(mutex_filter)
.and(auth_filter(data, TokenType::Agent))
.map(endpoint_error);
warp::any()
.and(
config
.or(next_experiment)
.unify()
.or(next_crate)
.unify()
.or(record_progress)
.unify()
.or(heartbeat)
.unify()
.or(error)
.unify(),
)
.map(handle_results)
.recover(handle_errors)
.unify()
}
fn endpoint_config(
caps: Capabilities,
data: Arc<Data>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
data.agents.add_capabilities(&auth.name, &caps)?;
Ok(ApiResponse::Success {
result: AgentConfig {
agent_name: auth.name,
crater_config: data.config.clone(),
},
}
.into_response()?)
}
fn endpoint_next_experiment(
mutex: Arc<Mutex<Data>>,
github_data: Option<Arc<GithubData>>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
//we need to make sure that Experiment::next executes uninterrupted
let data = mutex.lock().unwrap();
let next = Experiment::next(&data.db, &Assignee::Agent(auth.name))?;
let result = if let Some((new, ex)) = next {
if new {
if let Some(github_data) = github_data.as_ref() {
if let Some(ref github_issue) = ex.github_issue {
Message::new()
.line(
"construction",
format!("Experiment **`{}`** is now **running**", ex.name,),
)
.send(&github_issue.api_url, &data, github_data)?;
}
}
}
Some(ex)
} else {
None
};
Ok(ApiResponse::Success { result }.into_response()?)
}
fn endpoint_next_crate(
experiment: String,
data: Arc<Data>,
_auth: AuthDetails,
) -> Fallible<Response<Body>> {
let result: Option<crate::crates::Crate> =
if let Some(ex) = Experiment::get(&data.db, &experiment)? {
let mut crates = ex.get_uncompleted_crates(&data.db, Some(1))?;
if crates.is_empty() {
None
} else {
Some(crates.remove(0))
}
} else {
None
};
Ok(ApiResponse::Success { result }.into_response()?)
}
#[derive(Clone)]
pub struct RecordProgressThread {
// String is the worker name
queue: Sender<(ExperimentData<ProgressData>, String)>,
in_flight_requests: Arc<(Mutex<usize>, Condvar)>,
}
impl RecordProgressThread {
pub fn new(
db: crate::db::Database,
metrics: crate::server::metrics::Metrics,
) -> RecordProgressThread {
// 64 message queue, after which we start load shedding automatically.
let (tx, rx) = crossbeam_channel::bounded(64);
let in_flight_requests = Arc::new((Mutex::new(0), Condvar::new()));
let this = RecordProgressThread {
queue: tx,
in_flight_requests,
};
let ret = this.clone();
std::thread::spawn(move || loop {
// Panics should already be logged and otherwise there's not much we
// can/should do.
let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
let (result, worker_name) = rx.recv().unwrap();
this.block_until_idle();
let start = std::time::Instant::now();
if let Some(ex) = Experiment::get(&db, &result.experiment_name).unwrap() {
let db = DatabaseDB::new(&db);
if let Err(e) = db.store(&ex, &result.data, EncodingType::Plain) {
// Failing to record a result is basically fine -- this
// just means that we'll have to re-try this job.
log::error!("Failed to store result into database: {:?}", e);
crate::utils::report_failure(&e);
}
metrics.record_completed_jobs(
&worker_name,
&ex.name,
result.data.results.len() as i64,
);
if let Err(e) = db.clear_stale_records() {
// Not a hard failure. We can continue even if we failed
// to clear records from already completed runs...
log::error!("Failed to clear stale records: {:?}", e);
crate::utils::report_failure(&e);
}
metrics
.crater_endpoint_time
.with_label_values(&["record_progress"])
.observe(start.elapsed().as_secs_f64());
}
}));
});
ret
}
pub fn block_until_idle(&self) {
// Wait until there are zero in-flight requests.
//
// Note: We do **not** keep the lock here for the subsequent
// computation. That means that if we ever observe zero, then we're
// going to kick off the below computation; obviously requests may keep
// coming in -- we don't want to block those requests.
//
// The expectation that we will see zero here also implies that
// the server is *sometimes* idle (i.e., we are not constantly
// processing requests at 100% load). It's not clear that's 100%
// a valid assumption, but if we are at 100% load in terms of
// requests coming in, that's a problem in and of itself (since
// the majority of expected requests are record-progress, which
// should be *very* fast now that the work for them is async and
// offloaded to this thread).
// | .1
.wait_while(
self.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()),
|g| *g != 0,
)
.unwrap_or_else(|g| g.into_inner()),
);
}
pub fn start_request(&self) -> RequestGuard {
*self
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) += 1;
RequestGuard {
thread: self.clone(),
}
}
}
pub struct RequestGuard {
thread: RecordProgressThread,
}
impl Drop for RequestGuard {
fn drop(&mut self) {
*self
.thread
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) -= 1;
self.thread.in_flight_requests.1.notify_one();
}
}
// This endpoint does not use the mutex data wrapper to exclude running in
// parallel with other endpoints, which may mean that we (for example) are
// recording results for an abort'd experiment. This should generally be fine --
// the database already has foreign_keys enabled and that should ensure
// appropriate synchronization elsewhere. (If it doesn't, that's mostly a bug
// elsewhere, not here).
//
// In practice it's pretty likely that we won't fully run in parallel anyway,
// but this lets some of the work proceed without the lock being held, which is
// generally positive.
fn endpoint_record_progress(
result: ExperimentData<ProgressData>,
data: Arc<Data>,
auth: AuthDetails,
) | // Ignore the mutex guard (see above).
drop(
self.in_flight_requests | random_line_split |
agent.rs | = Rejection> + Clone {
let data_cloned = data.clone();
let data_filter = warp::any().map(move || data_cloned.clone());
let mutex_filter = warp::any().map(move || mutex.clone());
let github_data_filter = warp::any().map(move || github_data.clone());
let config = warp::post()
.and(warp::path("config"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_config);
let next_experiment = warp::post()
.and(warp::path("next-experiment"))
.and(warp::path::end())
.and(mutex_filter.clone())
.and(github_data_filter)
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_next_experiment);
let next_crate = warp::post()
.and(warp::path("next-crate"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_next_crate);
let record_progress = warp::post()
.and(warp::path("record-progress"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_record_progress);
let heartbeat = warp::post()
.and(warp::path("heartbeat"))
.and(warp::path::end())
.and(data_filter)
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_heartbeat);
let error = warp::post()
.and(warp::path("error"))
.and(warp::path::end())
.and(warp::body::json())
.and(mutex_filter)
.and(auth_filter(data, TokenType::Agent))
.map(endpoint_error);
warp::any()
.and(
config
.or(next_experiment)
.unify()
.or(next_crate)
.unify()
.or(record_progress)
.unify()
.or(heartbeat)
.unify()
.or(error)
.unify(),
)
.map(handle_results)
.recover(handle_errors)
.unify()
}
fn endpoint_config(
caps: Capabilities,
data: Arc<Data>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
data.agents.add_capabilities(&auth.name, &caps)?;
Ok(ApiResponse::Success {
result: AgentConfig {
agent_name: auth.name,
crater_config: data.config.clone(),
},
}
.into_response()?)
}
fn endpoint_next_experiment(
mutex: Arc<Mutex<Data>>,
github_data: Option<Arc<GithubData>>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
//we need to make sure that Experiment::next executes uninterrupted
let data = mutex.lock().unwrap();
let next = Experiment::next(&data.db, &Assignee::Agent(auth.name))?;
let result = if let Some((new, ex)) = next {
if new {
if let Some(github_data) = github_data.as_ref() {
if let Some(ref github_issue) = ex.github_issue {
Message::new()
.line(
"construction",
format!("Experiment **`{}`** is now **running**", ex.name,),
)
.send(&github_issue.api_url, &data, github_data)?;
}
}
}
Some(ex)
} else {
None
};
Ok(ApiResponse::Success { result }.into_response()?)
}
fn endpoint_next_crate(
experiment: String,
data: Arc<Data>,
_auth: AuthDetails,
) -> Fallible<Response<Body>> {
let result: Option<crate::crates::Crate> =
if let Some(ex) = Experiment::get(&data.db, &experiment)? {
let mut crates = ex.get_uncompleted_crates(&data.db, Some(1))?;
if crates.is_empty() {
None
} else {
Some(crates.remove(0))
}
} else | ;
Ok(ApiResponse::Success { result }.into_response()?)
}
#[derive(Clone)]
pub struct RecordProgressThread {
// String is the worker name
queue: Sender<(ExperimentData<ProgressData>, String)>,
in_flight_requests: Arc<(Mutex<usize>, Condvar)>,
}
impl RecordProgressThread {
pub fn new(
db: crate::db::Database,
metrics: crate::server::metrics::Metrics,
) -> RecordProgressThread {
// 64 message queue, after which we start load shedding automatically.
let (tx, rx) = crossbeam_channel::bounded(64);
let in_flight_requests = Arc::new((Mutex::new(0), Condvar::new()));
let this = RecordProgressThread {
queue: tx,
in_flight_requests,
};
let ret = this.clone();
std::thread::spawn(move || loop {
// Panics should already be logged and otherwise there's not much we
// can/should do.
let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
let (result, worker_name) = rx.recv().unwrap();
this.block_until_idle();
let start = std::time::Instant::now();
if let Some(ex) = Experiment::get(&db, &result.experiment_name).unwrap() {
let db = DatabaseDB::new(&db);
if let Err(e) = db.store(&ex, &result.data, EncodingType::Plain) {
// Failing to record a result is basically fine -- this
// just means that we'll have to re-try this job.
log::error!("Failed to store result into database: {:?}", e);
crate::utils::report_failure(&e);
}
metrics.record_completed_jobs(
&worker_name,
&ex.name,
result.data.results.len() as i64,
);
if let Err(e) = db.clear_stale_records() {
// Not a hard failure. We can continue even if we failed
// to clear records from already completed runs...
log::error!("Failed to clear stale records: {:?}", e);
crate::utils::report_failure(&e);
}
metrics
.crater_endpoint_time
.with_label_values(&["record_progress"])
.observe(start.elapsed().as_secs_f64());
}
}));
});
ret
}
pub fn block_until_idle(&self) {
// Wait until there are zero in-flight requests.
//
// Note: We do **not** keep the lock here for the subsequent
// computation. That means that if we ever observe zero, then we're
// going to kick off the below computation; obviously requests may keep
// coming in -- we don't want to block those requests.
//
// The expectation that we will see zero here also implies that
// the server is *sometimes* idle (i.e., we are not constantly
// processing requests at 100% load). It's not clear that's 100%
// a valid assumption, but if we are at 100% load in terms of
// requests coming in, that's a problem in and of itself (since
// the majority of expected requests are record-progress, which
// should be *very* fast now that the work for them is async and
// offloaded to this thread).
//
// Ignore the mutex guard (see above).
drop(
self.in_flight_requests
.1
.wait_while(
self.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()),
|g| *g != 0,
)
.unwrap_or_else(|g| g.into_inner()),
);
}
pub fn start_request(&self) -> RequestGuard {
*self
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) += 1;
RequestGuard {
thread: self.clone(),
}
}
}
pub struct RequestGuard {
thread: RecordProgressThread,
}
impl Drop for RequestGuard {
fn drop(&mut self) {
*self
.thread
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) -= 1;
self.thread.in_flight_requests.1.notify_one();
}
}
// This endpoint does not use the mutex data wrapper to exclude running in
// parallel with other endpoints, which may mean that we (for example) are
// recording results for an abort'd experiment. This should generally be fine --
// the database already has foreign_keys enabled and that should ensure
// appropriate synchronization elsewhere. (If it doesn't, that's mostly a bug
// elsewhere, not here).
//
// In practice it's pretty likely that we won't fully run in parallel anyway,
// but this lets some of the work proceed without the lock being held, which is
// generally positive.
fn endpoint_record_progress(
result: ExperimentData<ProgressData>,
data: Arc<Data>,
auth: AuthDetails | {
None
} | conditional_block |
agent.rs | = Rejection> + Clone {
let data_cloned = data.clone();
let data_filter = warp::any().map(move || data_cloned.clone());
let mutex_filter = warp::any().map(move || mutex.clone());
let github_data_filter = warp::any().map(move || github_data.clone());
let config = warp::post()
.and(warp::path("config"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_config);
let next_experiment = warp::post()
.and(warp::path("next-experiment"))
.and(warp::path::end())
.and(mutex_filter.clone())
.and(github_data_filter)
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_next_experiment);
let next_crate = warp::post()
.and(warp::path("next-crate"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_next_crate);
let record_progress = warp::post()
.and(warp::path("record-progress"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_record_progress);
let heartbeat = warp::post()
.and(warp::path("heartbeat"))
.and(warp::path::end())
.and(data_filter)
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_heartbeat);
let error = warp::post()
.and(warp::path("error"))
.and(warp::path::end())
.and(warp::body::json())
.and(mutex_filter)
.and(auth_filter(data, TokenType::Agent))
.map(endpoint_error);
warp::any()
.and(
config
.or(next_experiment)
.unify()
.or(next_crate)
.unify()
.or(record_progress)
.unify()
.or(heartbeat)
.unify()
.or(error)
.unify(),
)
.map(handle_results)
.recover(handle_errors)
.unify()
}
fn endpoint_config(
caps: Capabilities,
data: Arc<Data>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
data.agents.add_capabilities(&auth.name, &caps)?;
Ok(ApiResponse::Success {
result: AgentConfig {
agent_name: auth.name,
crater_config: data.config.clone(),
},
}
.into_response()?)
}
fn endpoint_next_experiment(
mutex: Arc<Mutex<Data>>,
github_data: Option<Arc<GithubData>>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
//we need to make sure that Experiment::next executes uninterrupted
let data = mutex.lock().unwrap();
let next = Experiment::next(&data.db, &Assignee::Agent(auth.name))?;
let result = if let Some((new, ex)) = next {
if new {
if let Some(github_data) = github_data.as_ref() {
if let Some(ref github_issue) = ex.github_issue {
Message::new()
.line(
"construction",
format!("Experiment **`{}`** is now **running**", ex.name,),
)
.send(&github_issue.api_url, &data, github_data)?;
}
}
}
Some(ex)
} else {
None
};
Ok(ApiResponse::Success { result }.into_response()?)
}
fn endpoint_next_crate(
experiment: String,
data: Arc<Data>,
_auth: AuthDetails,
) -> Fallible<Response<Body>> {
let result: Option<crate::crates::Crate> =
if let Some(ex) = Experiment::get(&data.db, &experiment)? {
let mut crates = ex.get_uncompleted_crates(&data.db, Some(1))?;
if crates.is_empty() {
None
} else {
Some(crates.remove(0))
}
} else {
None
};
Ok(ApiResponse::Success { result }.into_response()?)
}
#[derive(Clone)]
pub struct RecordProgressThread {
// String is the worker name
queue: Sender<(ExperimentData<ProgressData>, String)>,
in_flight_requests: Arc<(Mutex<usize>, Condvar)>,
}
impl RecordProgressThread {
pub fn new(
db: crate::db::Database,
metrics: crate::server::metrics::Metrics,
) -> RecordProgressThread {
// 64 message queue, after which we start load shedding automatically.
let (tx, rx) = crossbeam_channel::bounded(64);
let in_flight_requests = Arc::new((Mutex::new(0), Condvar::new()));
let this = RecordProgressThread {
queue: tx,
in_flight_requests,
};
let ret = this.clone();
std::thread::spawn(move || loop {
// Panics should already be logged and otherwise there's not much we
// can/should do.
let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
let (result, worker_name) = rx.recv().unwrap();
this.block_until_idle();
let start = std::time::Instant::now();
if let Some(ex) = Experiment::get(&db, &result.experiment_name).unwrap() {
let db = DatabaseDB::new(&db);
if let Err(e) = db.store(&ex, &result.data, EncodingType::Plain) {
// Failing to record a result is basically fine -- this
// just means that we'll have to re-try this job.
log::error!("Failed to store result into database: {:?}", e);
crate::utils::report_failure(&e);
}
metrics.record_completed_jobs(
&worker_name,
&ex.name,
result.data.results.len() as i64,
);
if let Err(e) = db.clear_stale_records() {
// Not a hard failure. We can continue even if we failed
// to clear records from already completed runs...
log::error!("Failed to clear stale records: {:?}", e);
crate::utils::report_failure(&e);
}
metrics
.crater_endpoint_time
.with_label_values(&["record_progress"])
.observe(start.elapsed().as_secs_f64());
}
}));
});
ret
}
pub fn block_until_idle(&self) {
// Wait until there are zero in-flight requests.
//
// Note: We do **not** keep the lock here for the subsequent
// computation. That means that if we ever observe zero, then we're
// going to kick off the below computation; obviously requests may keep
// coming in -- we don't want to block those requests.
//
// The expectation that we will see zero here also implies that
// the server is *sometimes* idle (i.e., we are not constantly
// processing requests at 100% load). It's not clear that's 100%
// a valid assumption, but if we are at 100% load in terms of
// requests coming in, that's a problem in and of itself (since
// the majority of expected requests are record-progress, which
// should be *very* fast now that the work for them is async and
// offloaded to this thread).
//
// Ignore the mutex guard (see above).
drop(
self.in_flight_requests
.1
.wait_while(
self.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()),
|g| *g != 0,
)
.unwrap_or_else(|g| g.into_inner()),
);
}
pub fn start_request(&self) -> RequestGuard {
*self
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) += 1;
RequestGuard {
thread: self.clone(),
}
}
}
pub struct | {
thread: RecordProgressThread,
}
impl Drop for RequestGuard {
fn drop(&mut self) {
*self
.thread
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) -= 1;
self.thread.in_flight_requests.1.notify_one();
}
}
// This endpoint does not use the mutex data wrapper to exclude running in
// parallel with other endpoints, which may mean that we (for example) are
// recording results for an abort'd experiment. This should generally be fine --
// the database already has foreign_keys enabled and that should ensure
// appropriate synchronization elsewhere. (If it doesn't, that's mostly a bug
// elsewhere, not here).
//
// In practice it's pretty likely that we won't fully run in parallel anyway,
// but this lets some of the work proceed without the lock being held, which is
// generally positive.
fn endpoint_record_progress(
result: ExperimentData<ProgressData>,
data: Arc<Data>,
auth: AuthDetails,
| RequestGuard | identifier_name |
message.rs | size.
const MAX_MESSAGE_SIZE: u32 = 2^27;
/// A message consists of a header and a body. If you think of a message as a package,
/// the header is the address, and the body contains the package contents.
/// Both header and body use the D-Bus [type system](https://dbus.freedesktop.org/doc/dbus-specification.html#type-system) and format for serializing data.
struct Message {
/// The message delivery system uses the header information to figure out
/// where to send the message and how to interpret it.
header: Header,
/// The body of the message is made up of zero or more arguments,
/// which are typed values, such as an integer or a byte array.
body: Body,
}
impl Message {
fn write<T>(&self, writer:T) -> Result<(), io::Error>
where T: io::Write
{
let mut writer = DbusWriter::new(writer);
match self.header.endianess_flag {
EndianessFlag::LittleEndian => {
self.header.write::<T, LittleEndian>(&mut writer)?;
self.body.write::<T, LittleEndian>(&mut writer)?;
},
EndianessFlag::BigEndian => {
self.header.write::<T, BigEndian>(&mut writer)?;
self.body.write::<T, BigEndian>(&mut writer)?;
},
};
Ok(())
}
}
/// Endianness flag; ASCII 'l' for little-endian or ASCII 'B' for big-endian.
/// Both header and body are in this endianness.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum EndianessFlag {
LittleEndian,
BigEndian,
}
impl DbusWrite for EndianessFlag {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
match self {
EndianessFlag::LittleEndian => writer.write_u8(b'l'),
EndianessFlag::BigEndian => writer.write_u8(b'B'),
}
}
}
impl DbusRead<EndianessFlag> for EndianessFlag {
fn read<T1, T2>(&self, reader: &mut DbusReader<T1>) -> Result<EndianessFlag, io::Error>
where T1: io::Read,
T2: ByteOrder
{
match reader.read_u8()? {
b'l' => Ok(EndianessFlag::LittleEndian),
b'B' => Ok(EndianessFlag::BigEndian),
x => {
let str_err = format!("Invalid endianess `{}`", x);
Err(io::Error::new(io::ErrorKind::InvalidData, str_err))
},
}
}
}
/// Message type. Unknown types must be ignored.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum MessageType {
/// This is an invalid type.
Invalid = 0,
/// Method call. This message type may prompt a reply.
MethodCall = 1,
/// Method reply with returned data.
MethodReturn = 2,
/// Error reply. If the first argument exists
/// and is a string, it is an error message.
Error = 3,
/// Signal emission.
Signal = 4,
}
/// Major protocol version of the sending application.
/// If the major protocol version of the receiving application does not match,
/// the applications will not be able to communicate and the D-Bus connection must be disconnected.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct MajorProtocolVersion(pub u8);
impl DbusWrite for MajorProtocolVersion {
fn | <T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
writer.write_u8(self.0)
}
}
bitflags! {
struct HeaderFlags: u8 {
/// This message does not expect method return replies or error replies,
/// even if it is of a type that can have a reply; the reply should be omitted.
const NO_REPLY_EXPECTED = 0x1;
/// The bus must not launch an owner for the destination name in response to this message.
const NO_AUTO_START = 0x1;
/// This flag may be set on a method call message to inform the receiving side that the caller
/// is prepared to wait for interactive authorization, which might take a considerable time to complete.
const ALLOW_INTERACTIVE_AUTHORIZATION = 0x4;
}
}
/// The array at the end of the header contains header fields,
/// where each field is a 1-byte field code followed by a field value.
/// A header must contain the required header fields for its message type,
/// and zero or more of any optional header fields.
#[repr(u8)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum HeaderFieldCode {
/// Not a valid field name (error if it appears in a message)
Invalid = 0,
/// The object to send a call to, or the object a signal is emitted from.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Path = 1,
/// The interface to invoke a method call on, or that a signal is emitted from.
/// Required in `MessageType::Signal`.
Interface = 2,
/// The member, either the method name or signal name.
/// This header field is controlled by the message sender.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Member = 3,
/// The name of the error that occurred, for errors.
/// Required in `MessageType::Error`.
ErrorName = 4,
/// The serial number of the message this message is a reply to.
/// Required in `MessageType::Error` and `MessageType::MethodReturn`.
ReplySerial = 5,
/// The name of the connection this message is intended for.
/// Optional.
Destination = 6,
/// Unique name of the sending connection. This field is usually only meaningful
/// in combination with the message bus, but other servers may define their own meanings for it.
/// Optional.
Sender = 7,
/// The signature of the message body. If omitted, it is assumed to be the empty signature "".
/// Optional.
Signature = 8,
/// The number of Unix file descriptors that accompany the message.
/// If omitted, it is assumed that no Unix file descriptors accompany the message.
UnixFds = 9,
}
/// The array at the end of the header contains header fields,
/// where each field is a 1-byte field code followed by a field value.
/// A header must contain the required header fields for its message type,
/// and zero or more of any optional header fields.
///
#[repr(u8)]
enum HeaderField {
/// Not a valid field name (error if it appears in a message)
Invalid,
/// The object to send a call to, or the object a signal is emitted from.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Path(ObjectPath),
/// The interface to invoke a method call on, or that a signal is emitted from.
/// Required in `MessageType::Signal`.
Interface(InterfaceName),
/// The member, either the method name or signal name.
/// This header field is controlled by the message sender.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Member(MemberName),
/// The name of the error that occurred, for errors.
/// Required in `MessageType::Error`.
ErrorName(ErrorName),
/// The serial number of the message this message is a reply to.
/// Required in `MessageType::Error` and `MessageType::MethodReturn`.
ReplySerial(Serial),
/// The name of the connection this message is intended for.
/// Optional.
Destination(String),
/// Unique name of the sending connection. This field is usually only meaningful
/// in combination with the message bus, but other servers may define their own meanings for it.
/// Optional.
Sender(String),
/// The signature of the message body. If omitted, it is assumed to be the empty signature "".
/// Optional.
Signature(Signature),
/// The number of Unix file descriptors that accompany the message.
/// If omitted, it is assumed that no Unix file descriptors accompany the message.
UnixFds(u32),
}
impl DbusWrite for HeaderField {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
match self {
HeaderField::Invalid => return Err(io::Error::new(io::ErrorKind::InvalidInput, "HeaderField::Invalid can not be marshaled!")),
HeaderField::Path(object_path) => object_path.write::<_, T2>(writer)?,
HeaderField::Interface(interface_name) => interface_name.write::<_, T2>(writer)?,
HeaderField::Member(member_name) => member_name.write::<_, T2>(writer)?,
HeaderField::ErrorName(error_name) => error_name.write::<_, T2>(writer)?,
HeaderField::ReplySerial(serial) => serial.write::<_, T2>(writer)?,
HeaderField::Destination(destination) => writer.write_string::<T2>(destination)? | write | identifier_name |
message.rs | size.
const MAX_MESSAGE_SIZE: u32 = 2^27;
/// A message consists of a header and a body. If you think of a message as a package,
/// the header is the address, and the body contains the package contents.
/// Both header and body use the D-Bus [type system](https://dbus.freedesktop.org/doc/dbus-specification.html#type-system) and format for serializing data. | /// The body of the message is made up of zero or more arguments,
/// which are typed values, such as an integer or a byte array.
body: Body,
}
impl Message {
fn write<T>(&self, writer:T) -> Result<(), io::Error>
where T: io::Write
{
let mut writer = DbusWriter::new(writer);
match self.header.endianess_flag {
EndianessFlag::LittleEndian => {
self.header.write::<T, LittleEndian>(&mut writer)?;
self.body.write::<T, LittleEndian>(&mut writer)?;
},
EndianessFlag::BigEndian => {
self.header.write::<T, BigEndian>(&mut writer)?;
self.body.write::<T, BigEndian>(&mut writer)?;
},
};
Ok(())
}
}
/// Endianness flag; ASCII 'l' for little-endian or ASCII 'B' for big-endian.
/// Both header and body are in this endianness.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum EndianessFlag {
LittleEndian,
BigEndian,
}
impl DbusWrite for EndianessFlag {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
match self {
EndianessFlag::LittleEndian => writer.write_u8(b'l'),
EndianessFlag::BigEndian => writer.write_u8(b'B'),
}
}
}
impl DbusRead<EndianessFlag> for EndianessFlag {
fn read<T1, T2>(&self, reader: &mut DbusReader<T1>) -> Result<EndianessFlag, io::Error>
where T1: io::Read,
T2: ByteOrder
{
match reader.read_u8()? {
b'l' => Ok(EndianessFlag::LittleEndian),
b'B' => Ok(EndianessFlag::BigEndian),
x => {
let str_err = format!("Invalid endianess `{}`", x);
Err(io::Error::new(io::ErrorKind::InvalidData, str_err))
},
}
}
}
/// Message type. Unknown types must be ignored.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum MessageType {
/// This is an invalid type.
Invalid = 0,
/// Method call. This message type may prompt a reply.
MethodCall = 1,
/// Method reply with returned data.
MethodReturn = 2,
/// Error reply. If the first argument exists
/// and is a string, it is an error message.
Error = 3,
/// Signal emission.
Signal = 4,
}
/// Major protocol version of the sending application.
/// If the major protocol version of the receiving application does not match,
/// the applications will not be able to communicate and the D-Bus connection must be disconnected.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct MajorProtocolVersion(pub u8);
impl DbusWrite for MajorProtocolVersion {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
writer.write_u8(self.0)
}
}
bitflags! {
struct HeaderFlags: u8 {
/// This message does not expect method return replies or error replies,
/// even if it is of a type that can have a reply; the reply should be omitted.
const NO_REPLY_EXPECTED = 0x1;
/// The bus must not launch an owner for the destination name in response to this message.
const NO_AUTO_START = 0x1;
/// This flag may be set on a method call message to inform the receiving side that the caller
/// is prepared to wait for interactive authorization, which might take a considerable time to complete.
const ALLOW_INTERACTIVE_AUTHORIZATION = 0x4;
}
}
/// The array at the end of the header contains header fields,
/// where each field is a 1-byte field code followed by a field value.
/// A header must contain the required header fields for its message type,
/// and zero or more of any optional header fields.
#[repr(u8)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum HeaderFieldCode {
/// Not a valid field name (error if it appears in a message)
Invalid = 0,
/// The object to send a call to, or the object a signal is emitted from.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Path = 1,
/// The interface to invoke a method call on, or that a signal is emitted from.
/// Required in `MessageType::Signal`.
Interface = 2,
/// The member, either the method name or signal name.
/// This header field is controlled by the message sender.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Member = 3,
/// The name of the error that occurred, for errors.
/// Required in `MessageType::Error`.
ErrorName = 4,
/// The serial number of the message this message is a reply to.
/// Required in `MessageType::Error` and `MessageType::MethodReturn`.
ReplySerial = 5,
/// The name of the connection this message is intended for.
/// Optional.
Destination = 6,
/// Unique name of the sending connection. This field is usually only meaningful
/// in combination with the message bus, but other servers may define their own meanings for it.
/// Optional.
Sender = 7,
/// The signature of the message body. If omitted, it is assumed to be the empty signature "".
/// Optional.
Signature = 8,
/// The number of Unix file descriptors that accompany the message.
/// If omitted, it is assumed that no Unix file descriptors accompany the message.
UnixFds = 9,
}
/// The array at the end of the header contains header fields,
/// where each field is a 1-byte field code followed by a field value.
/// A header must contain the required header fields for its message type,
/// and zero or more of any optional header fields.
///
#[repr(u8)]
enum HeaderField {
/// Not a valid field name (error if it appears in a message)
Invalid,
/// The object to send a call to, or the object a signal is emitted from.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Path(ObjectPath),
/// The interface to invoke a method call on, or that a signal is emitted from.
/// Required in `MessageType::Signal`.
Interface(InterfaceName),
/// The member, either the method name or signal name.
/// This header field is controlled by the message sender.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Member(MemberName),
/// The name of the error that occurred, for errors.
/// Required in `MessageType::Error`.
ErrorName(ErrorName),
/// The serial number of the message this message is a reply to.
/// Required in `MessageType::Error` and `MessageType::MethodReturn`.
ReplySerial(Serial),
/// The name of the connection this message is intended for.
/// Optional.
Destination(String),
/// Unique name of the sending connection. This field is usually only meaningful
/// in combination with the message bus, but other servers may define their own meanings for it.
/// Optional.
Sender(String),
/// The signature of the message body. If omitted, it is assumed to be the empty signature "".
/// Optional.
Signature(Signature),
/// The number of Unix file descriptors that accompany the message.
/// If omitted, it is assumed that no Unix file descriptors accompany the message.
UnixFds(u32),
}
impl DbusWrite for HeaderField {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
match self {
HeaderField::Invalid => return Err(io::Error::new(io::ErrorKind::InvalidInput, "HeaderField::Invalid can not be marshaled!")),
HeaderField::Path(object_path) => object_path.write::<_, T2>(writer)?,
HeaderField::Interface(interface_name) => interface_name.write::<_, T2>(writer)?,
HeaderField::Member(member_name) => member_name.write::<_, T2>(writer)?,
HeaderField::ErrorName(error_name) => error_name.write::<_, T2>(writer)?,
HeaderField::ReplySerial(serial) => serial.write::<_, T2>(writer)?,
HeaderField::Destination(destination) => writer.write_string::<T2>(destination)?,
| struct Message {
/// The message delivery system uses the header information to figure out
/// where to send the message and how to interpret it.
header: Header, | random_line_split |
message.rs | = format!("Invalid endianess `{}`", x);
Err(io::Error::new(io::ErrorKind::InvalidData, str_err))
},
}
}
}
/// Message type. Unknown types must be ignored.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum MessageType {
/// This is an invalid type.
Invalid = 0,
/// Method call. This message type may prompt a reply.
MethodCall = 1,
/// Method reply with returned data.
MethodReturn = 2,
/// Error reply. If the first argument exists
/// and is a string, it is an error message.
Error = 3,
/// Signal emission.
Signal = 4,
}
/// Major protocol version of the sending application.
/// If the major protocol version of the receiving application does not match,
/// the applications will not be able to communicate and the D-Bus connection must be disconnected.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct MajorProtocolVersion(pub u8);
impl DbusWrite for MajorProtocolVersion {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
writer.write_u8(self.0)
}
}
bitflags! {
struct HeaderFlags: u8 {
/// This message does not expect method return replies or error replies,
/// even if it is of a type that can have a reply; the reply should be omitted.
const NO_REPLY_EXPECTED = 0x1;
/// The bus must not launch an owner for the destination name in response to this message.
const NO_AUTO_START = 0x1;
/// This flag may be set on a method call message to inform the receiving side that the caller
/// is prepared to wait for interactive authorization, which might take a considerable time to complete.
const ALLOW_INTERACTIVE_AUTHORIZATION = 0x4;
}
}
/// The array at the end of the header contains header fields,
/// where each field is a 1-byte field code followed by a field value.
/// A header must contain the required header fields for its message type,
/// and zero or more of any optional header fields.
#[repr(u8)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum HeaderFieldCode {
/// Not a valid field name (error if it appears in a message)
Invalid = 0,
/// The object to send a call to, or the object a signal is emitted from.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Path = 1,
/// The interface to invoke a method call on, or that a signal is emitted from.
/// Required in `MessageType::Signal`.
Interface = 2,
/// The member, either the method name or signal name.
/// This header field is controlled by the message sender.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Member = 3,
/// The name of the error that occurred, for errors.
/// Required in `MessageType::Error`.
ErrorName = 4,
/// The serial number of the message this message is a reply to.
/// Required in `MessageType::Error` and `MessageType::MethodReturn`.
ReplySerial = 5,
/// The name of the connection this message is intended for.
/// Optional.
Destination = 6,
/// Unique name of the sending connection. This field is usually only meaningful
/// in combination with the message bus, but other servers may define their own meanings for it.
/// Optional.
Sender = 7,
/// The signature of the message body. If omitted, it is assumed to be the empty signature "".
/// Optional.
Signature = 8,
/// The number of Unix file descriptors that accompany the message.
/// If omitted, it is assumed that no Unix file descriptors accompany the message.
UnixFds = 9,
}
/// The array at the end of the header contains header fields,
/// where each field is a 1-byte field code followed by a field value.
/// A header must contain the required header fields for its message type,
/// and zero or more of any optional header fields.
///
#[repr(u8)]
enum HeaderField {
/// Not a valid field name (error if it appears in a message)
Invalid,
/// The object to send a call to, or the object a signal is emitted from.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Path(ObjectPath),
/// The interface to invoke a method call on, or that a signal is emitted from.
/// Required in `MessageType::Signal`.
Interface(InterfaceName),
/// The member, either the method name or signal name.
/// This header field is controlled by the message sender.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Member(MemberName),
/// The name of the error that occurred, for errors.
/// Required in `MessageType::Error`.
ErrorName(ErrorName),
/// The serial number of the message this message is a reply to.
/// Required in `MessageType::Error` and `MessageType::MethodReturn`.
ReplySerial(Serial),
/// The name of the connection this message is intended for.
/// Optional.
Destination(String),
/// Unique name of the sending connection. This field is usually only meaningful
/// in combination with the message bus, but other servers may define their own meanings for it.
/// Optional.
Sender(String),
/// The signature of the message body. If omitted, it is assumed to be the empty signature "".
/// Optional.
Signature(Signature),
/// The number of Unix file descriptors that accompany the message.
/// If omitted, it is assumed that no Unix file descriptors accompany the message.
UnixFds(u32),
}
impl DbusWrite for HeaderField {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
match self {
HeaderField::Invalid => return Err(io::Error::new(io::ErrorKind::InvalidInput, "HeaderField::Invalid can not be marshaled!")),
HeaderField::Path(object_path) => object_path.write::<_, T2>(writer)?,
HeaderField::Interface(interface_name) => interface_name.write::<_, T2>(writer)?,
HeaderField::Member(member_name) => member_name.write::<_, T2>(writer)?,
HeaderField::ErrorName(error_name) => error_name.write::<_, T2>(writer)?,
HeaderField::ReplySerial(serial) => serial.write::<_, T2>(writer)?,
HeaderField::Destination(destination) => writer.write_string::<T2>(destination)?,
HeaderField::Sender(sender) => writer.write_string::<T2>(sender)?,
HeaderField::Signature(signature) => signature.write::<_, T2>(writer)?,
HeaderField::UnixFds(fd) => writer.write_u32::<T2>(*fd)?,
};
Ok(())
}
}
/// The length of the header must be a multiple of 8, allowing the body to begin on
/// an 8-byte boundary when storing the entire message in a single buffer.
/// If the header does not naturally end on an 8-byte boundary up to 7 bytes of
/// nul-initialized alignment padding must be added.
/// https://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-header-fields
struct Header {
endianess_flag: EndianessFlag,
/// Message type. Unknown types must be ignored.
message_type: MessageType,
/// Bitwise OR of flags. Unknown flags must be ignored.
flags: HeaderFlags,
/// Major protocol version of the sending application.
/// If the major protocol version of the receiving application does not match,
/// the applications will not be able to communicate and the D-Bus connection must be disconnected.
major_protocol_version: MajorProtocolVersion,
/// Length in bytes of the message body, starting from the end of the header.
/// The header ends after its alignment padding to an 8-boundary.
length_message_body: u32,
/// The serial of this message, used as a cookie by the sender to identify
/// the reply corresponding to this request. This must not be zero.
serial: Serial,
/// An array of zero or more header fields where the byte is the field code,
/// and the variant is the field value. The message type determines which fields are required.
header_fields: Vec<(HeaderFieldCode, HeaderField)>,
}
impl DbusWrite for Header {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
| {
writer.write_u8(self.endianess_flag as u8)?;
writer.write_u8(self.message_type as u8)?;
writer.write_u8(self.flags.bits())?;
writer.write_u8(self.major_protocol_version.0)?;
writer.write_u32::<T2>(self.length_message_body)?;
writer.write_u32::<T2>(self.serial.0)?;
for (ref code, ref field) in self.header_fields.iter().by_ref() {
writer.write_u8(code.clone() as u8)?;
field.write::<T1, T2>(writer)?;
}
Ok(())
} | identifier_body |
|
genotype.go | }
func logm(level string, msg string, verbose bool) {
if verbose || level != "DEBUG" {
fmt.Fprintf(os.Stderr, "%s: %s: %s\n", time.Now().String(), level, msg)
}
}
func checkResult(e error) {
if e != nil {
log.Fatal(e)
}
}
// joinAlleles takes a list of alleles and returns a comma separated list of them
func joinAlleles(alleles AlleleResult) string {
keys := make([]string, 0, len(alleles))
for k := range alleles {
keys = append(keys, k)
}
return strings.Join(keys, ",")
}
// reverse complements a single nucleotide
func reverse(in byte) byte {
result, ok := REVERSE_MAP[in]
if !ok {
log.Fatal("failed to reverse complement")
}
return result
}
var REVERSE_MAP = map[byte]byte{'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}
// reverseComplement reverses and complements a string
func reverseComplement(in *bytes.Buffer) []byte {
var result []byte = make([]byte, in.Len(), in.Len())
for pos := in.Len() - 1; pos >= 0; pos-- {
current, ok := in.ReadByte()
checkResult(ok)
result[pos] = reverse(current)
}
return result
}
// addSearchableSequence adds an allele sequence to the hash
func addSearchableSequence(content string, sequence int, db QueryList) {
// for an exact match we only have to hash the start of the query
if len(content) < db.SeedSize |
position := 0
kmer := content[position : position+db.SeedSize]
kmerHash := stringToHash(kmer)
entry := QueryPos{Name: sequence, Pos: position, Content: content}
query, ok := db.Index[kmerHash]
if !ok {
query = make([]QueryPos, 0)
}
query = append(query, entry)
db.Index[kmerHash] = query
}
// searchSequence iterates over content and populates result with genes and matching
func searchSequence(content string, db QueryList, result GenomeAlleleResult, verbose bool, reverseComplement bool) {
// populates result with a map from gene name to list of found alleles
for position := 0; position <= len(content)-db.SeedSize; position += 1 {
kmer := content[position : position+db.SeedSize] // kmer at curreent position in content
kmerHash := stringToHash(kmer)
query, ok := db.Index[kmerHash]
if ok {
// logm("DEBUG", fmt.Sprintf("found %v potential locations for %s", len(query), content), verbose)
for _, candidate := range query {
// check for a match
if position-candidate.Pos >= 0 && position-candidate.Pos+len(candidate.Content) <= len(content) && content[position-candidate.Pos:position-candidate.Pos+len(candidate.Content)] == candidate.Content {
// it's a match, split the sequence name into gene and allele
geneAllele := strings.Split(db.Names[candidate.Name], "_")
alleles, ok := result[GeneName(geneAllele[0])]
if !ok {
alleles = make(AlleleResult, 0)
}
alleles[geneAllele[1]] = true
if reverseComplement {
logm("DEBUG", fmt.Sprintf("%s found at reverse complement -%v (%v)", db.Names[candidate.Name], position-candidate.Pos, len(content)-len(candidate.Content)-position+candidate.Pos), verbose)
} else {
logm("DEBUG", fmt.Sprintf("%s found at %v", db.Names[candidate.Name], position-candidate.Pos), verbose)
}
result[GeneName(geneAllele[0])] = alleles
} else {
// logm("DEBUG", fmt.Sprintf("didn't match %s", candidate.Content), verbose)
}
}
} else {
// logm("DEBUG", fmt.Sprintf("didn't find hash for %s", content), verbose)
}
}
}
// IndexSequences generates an index from a gzipped list of alleles for genotyping
func IndexSequences(sequences []string, cgstFilename string, seedSize int, verbose bool) QueryList {
logm("INFO", fmt.Sprintf("processing with cgst file '%s', seed size %v: %v sequence file(s)", cgstFilename, seedSize, len(sequences)), verbose)
var queryList QueryList
queryList.SeedSize = seedSize
queryList.Index = make(QueryIndex)
queryList.Cgst = CreateCGST(cgstFilename, verbose)
var content *bytes.Buffer
lines := 0
sequenceCount := 0
for _, sequenceFilename := range sequences {
logm("INFO", fmt.Sprintf("processing '%s'...", sequenceFilename), verbose)
// open sequences (either .fa or .fa.gz) file for reading
file, err := os.Open(sequenceFilename)
checkResult(err)
var scanner *bufio.Scanner
if strings.HasSuffix(sequenceFilename, ".gz") {
gr, err := gzip.NewReader(file)
checkResult(err)
scanner = bufio.NewScanner(gr)
} else {
scanner = bufio.NewScanner(file)
}
// index sequences
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, ">") {
if content != nil {
addSearchableSequence(content.String(), sequenceCount-1, queryList)
}
queryList.Names = append(queryList.Names, line[1:])
sequenceCount++
content = new(bytes.Buffer)
} else {
(*content).WriteString(line)
}
lines++
if lines%1000000 == 0 {
logm("INFO", fmt.Sprintf("processing %s: %v lines %v sequences. %v kmers", sequenceFilename, lines, len(queryList.Names), len(queryList.Index)), false)
}
}
addSearchableSequence(content.String(), sequenceCount-1, queryList)
logm("INFO", fmt.Sprintf("processing '%s': done", sequenceFilename), verbose)
file.Close()
}
logm("INFO", fmt.Sprintf("processing %v file(s): done. %v sequences", len(sequences), len(queryList.Names)), verbose)
return queryList
}
// SaveIndex writes an indexed collection of indexes for use with the genotype command
func SaveIndex(target string, source QueryList, verbose bool) {
logm("INFO", fmt.Sprintf("saving index to %s...", target), verbose)
file, err := os.Create(target)
checkResult(err)
defer file.Close()
gr := gzip.NewWriter(file)
defer gr.Close()
encoder := gob.NewEncoder(gr)
err = encoder.Encode(source.Names)
checkResult(err)
logm("INFO", fmt.Sprintf("%v sequence names saved", len(source.Names)), verbose)
err = encoder.Encode(source.SeedSize)
checkResult(err)
err = encoder.Encode(source.Cgst)
checkResult(err)
// save the index, but go has a size limit
indexSize := len(source.Index)
err = encoder.Encode(indexSize)
checkResult(err)
logm("INFO", fmt.Sprintf("%v queries to save...", indexSize), verbose)
count := 0
for key, value := range source.Index {
err = encoder.Encode(key)
checkResult(err)
err = encoder.Encode(value)
checkResult(err)
count++
if count%10000 == 0 {
logm("INFO", fmt.Sprintf("processing: saved %v items", count), false)
}
}
logm("INFO", fmt.Sprintf("saving index to %s: done", target), verbose)
}
func LoadIndex(source string, verbose bool) QueryList {
logm("INFO", fmt.Sprintf("loading index from %s...", source), verbose)
var result QueryList
// open fa.gz file
file, err := os.Open(source)
checkResult(err)
defer file.Close()
gr, err := gzip.NewReader(file)
checkResult(err)
defer gr.Close()
decoder := gob.NewDecoder(gr)
err = decoder.Decode(&result.Names)
checkResult(err)
logm("INFO", fmt.Sprintf("%v sequence names restored", len(result.Names)), verbose)
err = decoder.Decode(&result.SeedSize)
checkResult(err)
err = decoder.Decode(&result.Cgst)
checkResult(err)
var indexSize int
err = decoder.Decode(&indexSize)
checkResult(err)
result.Index = make(QueryIndex)
count := 0
for i := 0; i < indexSize; i++ {
var key uint32
var val []QueryPos
err = decoder.Decode(&key)
checkResult(err)
err = decoder.Decode(&val)
checkResult(err)
result.Index[key] = val
count++
if count%10000 == 0 {
logm("INFO", fmt.Sprintf("processing: loaded %v items", count), false)
}
// logm("DEBUG", fmt.Sprintf("last key: %v, values: %v", key, len(val)), verbose)
}
logm("INFO", fmt.Sprintf | {
logm("WARN", fmt.Sprintf("sequence %v is length %v, shorter than seed size %v", sequence, len(content), db.SeedSize), false)
return
} | conditional_block |
genotype.go | }
func logm(level string, msg string, verbose bool) {
if verbose || level != "DEBUG" {
fmt.Fprintf(os.Stderr, "%s: %s: %s\n", time.Now().String(), level, msg)
}
}
func checkResult(e error) {
if e != nil {
log.Fatal(e)
}
}
// joinAlleles takes a list of alleles and returns a comma separated list of them
func joinAlleles(alleles AlleleResult) string {
keys := make([]string, 0, len(alleles))
for k := range alleles {
keys = append(keys, k)
}
return strings.Join(keys, ",")
}
// reverse complements a single nucleotide
func reverse(in byte) byte {
result, ok := REVERSE_MAP[in]
if !ok {
log.Fatal("failed to reverse complement")
}
return result
}
var REVERSE_MAP = map[byte]byte{'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}
// reverseComplement reverses and complements a string
func reverseComplement(in *bytes.Buffer) []byte {
var result []byte = make([]byte, in.Len(), in.Len())
for pos := in.Len() - 1; pos >= 0; pos-- {
current, ok := in.ReadByte()
checkResult(ok)
result[pos] = reverse(current)
}
return result
}
// addSearchableSequence adds an allele sequence to the hash
func addSearchableSequence(content string, sequence int, db QueryList) |
// searchSequence iterates over content and populates result with genes and matching
func searchSequence(content string, db QueryList, result GenomeAlleleResult, verbose bool, reverseComplement bool) {
// populates result with a map from gene name to list of found alleles
for position := 0; position <= len(content)-db.SeedSize; position += 1 {
kmer := content[position : position+db.SeedSize] // kmer at curreent position in content
kmerHash := stringToHash(kmer)
query, ok := db.Index[kmerHash]
if ok {
// logm("DEBUG", fmt.Sprintf("found %v potential locations for %s", len(query), content), verbose)
for _, candidate := range query {
// check for a match
if position-candidate.Pos >= 0 && position-candidate.Pos+len(candidate.Content) <= len(content) && content[position-candidate.Pos:position-candidate.Pos+len(candidate.Content)] == candidate.Content {
// it's a match, split the sequence name into gene and allele
geneAllele := strings.Split(db.Names[candidate.Name], "_")
alleles, ok := result[GeneName(geneAllele[0])]
if !ok {
alleles = make(AlleleResult, 0)
}
alleles[geneAllele[1]] = true
if reverseComplement {
logm("DEBUG", fmt.Sprintf("%s found at reverse complement -%v (%v)", db.Names[candidate.Name], position-candidate.Pos, len(content)-len(candidate.Content)-position+candidate.Pos), verbose)
} else {
logm("DEBUG", fmt.Sprintf("%s found at %v", db.Names[candidate.Name], position-candidate.Pos), verbose)
}
result[GeneName(geneAllele[0])] = alleles
} else {
// logm("DEBUG", fmt.Sprintf("didn't match %s", candidate.Content), verbose)
}
}
} else {
// logm("DEBUG", fmt.Sprintf("didn't find hash for %s", content), verbose)
}
}
}
// IndexSequences generates an index from a gzipped list of alleles for genotyping
func IndexSequences(sequences []string, cgstFilename string, seedSize int, verbose bool) QueryList {
logm("INFO", fmt.Sprintf("processing with cgst file '%s', seed size %v: %v sequence file(s)", cgstFilename, seedSize, len(sequences)), verbose)
var queryList QueryList
queryList.SeedSize = seedSize
queryList.Index = make(QueryIndex)
queryList.Cgst = CreateCGST(cgstFilename, verbose)
var content *bytes.Buffer
lines := 0
sequenceCount := 0
for _, sequenceFilename := range sequences {
logm("INFO", fmt.Sprintf("processing '%s'...", sequenceFilename), verbose)
// open sequences (either .fa or .fa.gz) file for reading
file, err := os.Open(sequenceFilename)
checkResult(err)
var scanner *bufio.Scanner
if strings.HasSuffix(sequenceFilename, ".gz") {
gr, err := gzip.NewReader(file)
checkResult(err)
scanner = bufio.NewScanner(gr)
} else {
scanner = bufio.NewScanner(file)
}
// index sequences
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, ">") {
if content != nil {
addSearchableSequence(content.String(), sequenceCount-1, queryList)
}
queryList.Names = append(queryList.Names, line[1:])
sequenceCount++
content = new(bytes.Buffer)
} else {
(*content).WriteString(line)
}
lines++
if lines%1000000 == 0 {
logm("INFO", fmt.Sprintf("processing %s: %v lines %v sequences. %v kmers", sequenceFilename, lines, len(queryList.Names), len(queryList.Index)), false)
}
}
addSearchableSequence(content.String(), sequenceCount-1, queryList)
logm("INFO", fmt.Sprintf("processing '%s': done", sequenceFilename), verbose)
file.Close()
}
logm("INFO", fmt.Sprintf("processing %v file(s): done. %v sequences", len(sequences), len(queryList.Names)), verbose)
return queryList
}
// SaveIndex writes an indexed collection of indexes for use with the genotype command
func SaveIndex(target string, source QueryList, verbose bool) {
logm("INFO", fmt.Sprintf("saving index to %s...", target), verbose)
file, err := os.Create(target)
checkResult(err)
defer file.Close()
gr := gzip.NewWriter(file)
defer gr.Close()
encoder := gob.NewEncoder(gr)
err = encoder.Encode(source.Names)
checkResult(err)
logm("INFO", fmt.Sprintf("%v sequence names saved", len(source.Names)), verbose)
err = encoder.Encode(source.SeedSize)
checkResult(err)
err = encoder.Encode(source.Cgst)
checkResult(err)
// save the index, but go has a size limit
indexSize := len(source.Index)
err = encoder.Encode(indexSize)
checkResult(err)
logm("INFO", fmt.Sprintf("%v queries to save...", indexSize), verbose)
count := 0
for key, value := range source.Index {
err = encoder.Encode(key)
checkResult(err)
err = encoder.Encode(value)
checkResult(err)
count++
if count%10000 == 0 {
logm("INFO", fmt.Sprintf("processing: saved %v items", count), false)
}
}
logm("INFO", fmt.Sprintf("saving index to %s: done", target), verbose)
}
func LoadIndex(source string, verbose bool) QueryList {
logm("INFO", fmt.Sprintf("loading index from %s...", source), verbose)
var result QueryList
// open fa.gz file
file, err := os.Open(source)
checkResult(err)
defer file.Close()
gr, err := gzip.NewReader(file)
checkResult(err)
defer gr.Close()
decoder := gob.NewDecoder(gr)
err = decoder.Decode(&result.Names)
checkResult(err)
logm("INFO", fmt.Sprintf("%v sequence names restored", len(result.Names)), verbose)
err = decoder.Decode(&result.SeedSize)
checkResult(err)
err = decoder.Decode(&result.Cgst)
checkResult(err)
var indexSize int
err = decoder.Decode(&indexSize)
checkResult(err)
result.Index = make(QueryIndex)
count := 0
for i := 0; i < indexSize; i++ {
var key uint32
var val []QueryPos
err = decoder.Decode(&key)
checkResult(err)
err = decoder.Decode(&val)
checkResult(err)
result.Index[key] = val
count++
if count%10000 == 0 {
logm("INFO", fmt.Sprintf("processing: loaded %v items", count), false)
}
// logm("DEBUG", fmt.Sprintf("last key: %v, values: %v", key, len(val)), verbose)
}
logm("INFO", fmt.Sprintf | {
// for an exact match we only have to hash the start of the query
if len(content) < db.SeedSize {
logm("WARN", fmt.Sprintf("sequence %v is length %v, shorter than seed size %v", sequence, len(content), db.SeedSize), false)
return
}
position := 0
kmer := content[position : position+db.SeedSize]
kmerHash := stringToHash(kmer)
entry := QueryPos{Name: sequence, Pos: position, Content: content}
query, ok := db.Index[kmerHash]
if !ok {
query = make([]QueryPos, 0)
}
query = append(query, entry)
db.Index[kmerHash] = query
} | identifier_body |
genotype.go | }
func | (level string, msg string, verbose bool) {
if verbose || level != "DEBUG" {
fmt.Fprintf(os.Stderr, "%s: %s: %s\n", time.Now().String(), level, msg)
}
}
func checkResult(e error) {
if e != nil {
log.Fatal(e)
}
}
// joinAlleles takes a list of alleles and returns a comma separated list of them
func joinAlleles(alleles AlleleResult) string {
keys := make([]string, 0, len(alleles))
for k := range alleles {
keys = append(keys, k)
}
return strings.Join(keys, ",")
}
// reverse complements a single nucleotide
func reverse(in byte) byte {
result, ok := REVERSE_MAP[in]
if !ok {
log.Fatal("failed to reverse complement")
}
return result
}
var REVERSE_MAP = map[byte]byte{'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}
// reverseComplement reverses and complements a string
func reverseComplement(in *bytes.Buffer) []byte {
var result []byte = make([]byte, in.Len(), in.Len())
for pos := in.Len() - 1; pos >= 0; pos-- {
current, ok := in.ReadByte()
checkResult(ok)
result[pos] = reverse(current)
}
return result
}
// addSearchableSequence adds an allele sequence to the hash
func addSearchableSequence(content string, sequence int, db QueryList) {
// for an exact match we only have to hash the start of the query
if len(content) < db.SeedSize {
logm("WARN", fmt.Sprintf("sequence %v is length %v, shorter than seed size %v", sequence, len(content), db.SeedSize), false)
return
}
position := 0
kmer := content[position : position+db.SeedSize]
kmerHash := stringToHash(kmer)
entry := QueryPos{Name: sequence, Pos: position, Content: content}
query, ok := db.Index[kmerHash]
if !ok {
query = make([]QueryPos, 0)
}
query = append(query, entry)
db.Index[kmerHash] = query
}
// searchSequence iterates over content and populates result with genes and matching
func searchSequence(content string, db QueryList, result GenomeAlleleResult, verbose bool, reverseComplement bool) {
// populates result with a map from gene name to list of found alleles
for position := 0; position <= len(content)-db.SeedSize; position += 1 {
kmer := content[position : position+db.SeedSize] // kmer at curreent position in content
kmerHash := stringToHash(kmer)
query, ok := db.Index[kmerHash]
if ok {
// logm("DEBUG", fmt.Sprintf("found %v potential locations for %s", len(query), content), verbose)
for _, candidate := range query {
// check for a match
if position-candidate.Pos >= 0 && position-candidate.Pos+len(candidate.Content) <= len(content) && content[position-candidate.Pos:position-candidate.Pos+len(candidate.Content)] == candidate.Content {
// it's a match, split the sequence name into gene and allele
geneAllele := strings.Split(db.Names[candidate.Name], "_")
alleles, ok := result[GeneName(geneAllele[0])]
if !ok {
alleles = make(AlleleResult, 0)
}
alleles[geneAllele[1]] = true
if reverseComplement {
logm("DEBUG", fmt.Sprintf("%s found at reverse complement -%v (%v)", db.Names[candidate.Name], position-candidate.Pos, len(content)-len(candidate.Content)-position+candidate.Pos), verbose)
} else {
logm("DEBUG", fmt.Sprintf("%s found at %v", db.Names[candidate.Name], position-candidate.Pos), verbose)
}
result[GeneName(geneAllele[0])] = alleles
} else {
// logm("DEBUG", fmt.Sprintf("didn't match %s", candidate.Content), verbose)
}
}
} else {
// logm("DEBUG", fmt.Sprintf("didn't find hash for %s", content), verbose)
}
}
}
// IndexSequences generates an index from a gzipped list of alleles for genotyping
func IndexSequences(sequences []string, cgstFilename string, seedSize int, verbose bool) QueryList {
logm("INFO", fmt.Sprintf("processing with cgst file '%s', seed size %v: %v sequence file(s)", cgstFilename, seedSize, len(sequences)), verbose)
var queryList QueryList
queryList.SeedSize = seedSize
queryList.Index = make(QueryIndex)
queryList.Cgst = CreateCGST(cgstFilename, verbose)
var content *bytes.Buffer
lines := 0
sequenceCount := 0
for _, sequenceFilename := range sequences {
logm("INFO", fmt.Sprintf("processing '%s'...", sequenceFilename), verbose)
// open sequences (either .fa or .fa.gz) file for reading
file, err := os.Open(sequenceFilename)
checkResult(err)
var scanner *bufio.Scanner
if strings.HasSuffix(sequenceFilename, ".gz") {
gr, err := gzip.NewReader(file)
checkResult(err)
scanner = bufio.NewScanner(gr)
} else {
scanner = bufio.NewScanner(file)
}
// index sequences
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, ">") {
if content != nil {
addSearchableSequence(content.String(), sequenceCount-1, queryList)
}
queryList.Names = append(queryList.Names, line[1:])
sequenceCount++
content = new(bytes.Buffer)
} else {
(*content).WriteString(line)
}
lines++
if lines%1000000 == 0 {
logm("INFO", fmt.Sprintf("processing %s: %v lines %v sequences. %v kmers", sequenceFilename, lines, len(queryList.Names), len(queryList.Index)), false)
}
}
addSearchableSequence(content.String(), sequenceCount-1, queryList)
logm("INFO", fmt.Sprintf("processing '%s': done", sequenceFilename), verbose)
file.Close()
}
logm("INFO", fmt.Sprintf("processing %v file(s): done. %v sequences", len(sequences), len(queryList.Names)), verbose)
return queryList
}
// SaveIndex writes an indexed collection of indexes for use with the genotype command
func SaveIndex(target string, source QueryList, verbose bool) {
logm("INFO", fmt.Sprintf("saving index to %s...", target), verbose)
file, err := os.Create(target)
checkResult(err)
defer file.Close()
gr := gzip.NewWriter(file)
defer gr.Close()
encoder := gob.NewEncoder(gr)
err = encoder.Encode(source.Names)
checkResult(err)
logm("INFO", fmt.Sprintf("%v sequence names saved", len(source.Names)), verbose)
err = encoder.Encode(source.SeedSize)
checkResult(err)
err = encoder.Encode(source.Cgst)
checkResult(err)
// save the index, but go has a size limit
indexSize := len(source.Index)
err = encoder.Encode(indexSize)
checkResult(err)
logm("INFO", fmt.Sprintf("%v queries to save...", indexSize), verbose)
count := 0
for key, value := range source.Index {
err = encoder.Encode(key)
checkResult(err)
err = encoder.Encode(value)
checkResult(err)
count++
if count%10000 == 0 {
logm("INFO", fmt.Sprintf("processing: saved %v items", count), false)
}
}
logm("INFO", fmt.Sprintf("saving index to %s: done", target), verbose)
}
func LoadIndex(source string, verbose bool) QueryList {
logm("INFO", fmt.Sprintf("loading index from %s...", source), verbose)
var result QueryList
// open fa.gz file
file, err := os.Open(source)
checkResult(err)
defer file.Close()
gr, err := gzip.NewReader(file)
checkResult(err)
defer gr.Close()
decoder := gob.NewDecoder(gr)
err = decoder.Decode(&result.Names)
checkResult(err)
logm("INFO", fmt.Sprintf("%v sequence names restored", len(result.Names)), verbose)
err = decoder.Decode(&result.SeedSize)
checkResult(err)
err = decoder.Decode(&result.Cgst)
checkResult(err)
var indexSize int
err = decoder.Decode(&indexSize)
checkResult(err)
result.Index = make(QueryIndex)
count := 0
for i := 0; i < indexSize; i++ {
var key uint32
var val []QueryPos
err = decoder.Decode(&key)
checkResult(err)
err = decoder.Decode(&val)
checkResult(err)
result.Index[key] = val
count++
if count%10000 == 0 {
logm("INFO", fmt.Sprintf("processing: loaded %v items", count), false)
}
// logm("DEBUG", fmt.Sprintf("last key: %v, values: %v", key, len(val)), verbose)
}
logm("INFO", fmt.Sprintf(" | logm | identifier_name |
genotype.go | ()
}
func logm(level string, msg string, verbose bool) {
if verbose || level != "DEBUG" {
fmt.Fprintf(os.Stderr, "%s: %s: %s\n", time.Now().String(), level, msg)
}
}
func checkResult(e error) {
if e != nil {
log.Fatal(e)
}
}
// joinAlleles takes a list of alleles and returns a comma separated list of them
func joinAlleles(alleles AlleleResult) string {
keys := make([]string, 0, len(alleles))
for k := range alleles {
keys = append(keys, k)
}
return strings.Join(keys, ",")
}
// reverse complements a single nucleotide
func reverse(in byte) byte {
result, ok := REVERSE_MAP[in]
if !ok {
log.Fatal("failed to reverse complement")
}
return result
}
var REVERSE_MAP = map[byte]byte{'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}
// reverseComplement reverses and complements a string
func reverseComplement(in *bytes.Buffer) []byte {
var result []byte = make([]byte, in.Len(), in.Len())
for pos := in.Len() - 1; pos >= 0; pos-- {
current, ok := in.ReadByte()
checkResult(ok)
result[pos] = reverse(current)
}
return result | }
// addSearchableSequence adds an allele sequence to the hash
func addSearchableSequence(content string, sequence int, db QueryList) {
// for an exact match we only have to hash the start of the query
if len(content) < db.SeedSize {
logm("WARN", fmt.Sprintf("sequence %v is length %v, shorter than seed size %v", sequence, len(content), db.SeedSize), false)
return
}
position := 0
kmer := content[position : position+db.SeedSize]
kmerHash := stringToHash(kmer)
entry := QueryPos{Name: sequence, Pos: position, Content: content}
query, ok := db.Index[kmerHash]
if !ok {
query = make([]QueryPos, 0)
}
query = append(query, entry)
db.Index[kmerHash] = query
}
// searchSequence iterates over content and populates result with genes and matching
func searchSequence(content string, db QueryList, result GenomeAlleleResult, verbose bool, reverseComplement bool) {
// populates result with a map from gene name to list of found alleles
for position := 0; position <= len(content)-db.SeedSize; position += 1 {
kmer := content[position : position+db.SeedSize] // kmer at curreent position in content
kmerHash := stringToHash(kmer)
query, ok := db.Index[kmerHash]
if ok {
// logm("DEBUG", fmt.Sprintf("found %v potential locations for %s", len(query), content), verbose)
for _, candidate := range query {
// check for a match
if position-candidate.Pos >= 0 && position-candidate.Pos+len(candidate.Content) <= len(content) && content[position-candidate.Pos:position-candidate.Pos+len(candidate.Content)] == candidate.Content {
// it's a match, split the sequence name into gene and allele
geneAllele := strings.Split(db.Names[candidate.Name], "_")
alleles, ok := result[GeneName(geneAllele[0])]
if !ok {
alleles = make(AlleleResult, 0)
}
alleles[geneAllele[1]] = true
if reverseComplement {
logm("DEBUG", fmt.Sprintf("%s found at reverse complement -%v (%v)", db.Names[candidate.Name], position-candidate.Pos, len(content)-len(candidate.Content)-position+candidate.Pos), verbose)
} else {
logm("DEBUG", fmt.Sprintf("%s found at %v", db.Names[candidate.Name], position-candidate.Pos), verbose)
}
result[GeneName(geneAllele[0])] = alleles
} else {
// logm("DEBUG", fmt.Sprintf("didn't match %s", candidate.Content), verbose)
}
}
} else {
// logm("DEBUG", fmt.Sprintf("didn't find hash for %s", content), verbose)
}
}
}
// IndexSequences generates an index from a gzipped list of alleles for genotyping
func IndexSequences(sequences []string, cgstFilename string, seedSize int, verbose bool) QueryList {
logm("INFO", fmt.Sprintf("processing with cgst file '%s', seed size %v: %v sequence file(s)", cgstFilename, seedSize, len(sequences)), verbose)
var queryList QueryList
queryList.SeedSize = seedSize
queryList.Index = make(QueryIndex)
queryList.Cgst = CreateCGST(cgstFilename, verbose)
var content *bytes.Buffer
lines := 0
sequenceCount := 0
for _, sequenceFilename := range sequences {
logm("INFO", fmt.Sprintf("processing '%s'...", sequenceFilename), verbose)
// open sequences (either .fa or .fa.gz) file for reading
file, err := os.Open(sequenceFilename)
checkResult(err)
var scanner *bufio.Scanner
if strings.HasSuffix(sequenceFilename, ".gz") {
gr, err := gzip.NewReader(file)
checkResult(err)
scanner = bufio.NewScanner(gr)
} else {
scanner = bufio.NewScanner(file)
}
// index sequences
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, ">") {
if content != nil {
addSearchableSequence(content.String(), sequenceCount-1, queryList)
}
queryList.Names = append(queryList.Names, line[1:])
sequenceCount++
content = new(bytes.Buffer)
} else {
(*content).WriteString(line)
}
lines++
if lines%1000000 == 0 {
logm("INFO", fmt.Sprintf("processing %s: %v lines %v sequences. %v kmers", sequenceFilename, lines, len(queryList.Names), len(queryList.Index)), false)
}
}
addSearchableSequence(content.String(), sequenceCount-1, queryList)
logm("INFO", fmt.Sprintf("processing '%s': done", sequenceFilename), verbose)
file.Close()
}
logm("INFO", fmt.Sprintf("processing %v file(s): done. %v sequences", len(sequences), len(queryList.Names)), verbose)
return queryList
}
// SaveIndex writes an indexed collection of indexes for use with the genotype command
func SaveIndex(target string, source QueryList, verbose bool) {
logm("INFO", fmt.Sprintf("saving index to %s...", target), verbose)
file, err := os.Create(target)
checkResult(err)
defer file.Close()
gr := gzip.NewWriter(file)
defer gr.Close()
encoder := gob.NewEncoder(gr)
err = encoder.Encode(source.Names)
checkResult(err)
logm("INFO", fmt.Sprintf("%v sequence names saved", len(source.Names)), verbose)
err = encoder.Encode(source.SeedSize)
checkResult(err)
err = encoder.Encode(source.Cgst)
checkResult(err)
// save the index, but go has a size limit
indexSize := len(source.Index)
err = encoder.Encode(indexSize)
checkResult(err)
logm("INFO", fmt.Sprintf("%v queries to save...", indexSize), verbose)
count := 0
for key, value := range source.Index {
err = encoder.Encode(key)
checkResult(err)
err = encoder.Encode(value)
checkResult(err)
count++
if count%10000 == 0 {
logm("INFO", fmt.Sprintf("processing: saved %v items", count), false)
}
}
logm("INFO", fmt.Sprintf("saving index to %s: done", target), verbose)
}
func LoadIndex(source string, verbose bool) QueryList {
logm("INFO", fmt.Sprintf("loading index from %s...", source), verbose)
var result QueryList
// open fa.gz file
file, err := os.Open(source)
checkResult(err)
defer file.Close()
gr, err := gzip.NewReader(file)
checkResult(err)
defer gr.Close()
decoder := gob.NewDecoder(gr)
err = decoder.Decode(&result.Names)
checkResult(err)
logm("INFO", fmt.Sprintf("%v sequence names restored", len(result.Names)), verbose)
err = decoder.Decode(&result.SeedSize)
checkResult(err)
err = decoder.Decode(&result.Cgst)
checkResult(err)
var indexSize int
err = decoder.Decode(&indexSize)
checkResult(err)
result.Index = make(QueryIndex)
count := 0
for i := 0; i < indexSize; i++ {
var key uint32
var val []QueryPos
err = decoder.Decode(&key)
checkResult(err)
err = decoder.Decode(&val)
checkResult(err)
result.Index[key] = val
count++
if count%10000 == 0 {
logm("INFO", fmt.Sprintf("processing: loaded %v items", count), false)
}
// logm("DEBUG", fmt.Sprintf("last key: %v, values: %v", key, len(val)), verbose)
}
logm("INFO", fmt.Sprintf("loading | random_line_split |
|
lib.rs | type ContinueRunning = bool;
///Game to use with server must implement this trait.
pub trait Game {
/// delta_time: time elapsed from last call.
/// command: ordered commands commands from server.
/// from: Address of command sender.
/// Returns bool value indicating
/// should server continue running if false stops server.
/// Called only when new commands come to server.
/// Commands ordered and with some guarantees.
fn handle_command(
&mut self,
delta_time: Duration,
commands: Vec<Vec<u8>>,
from: SocketAddr,
) -> ContinueRunning;
///Gets new state to send to client.
/// delta_time: time elapsed throw last call.
/// Returns bytes with new game state for client.
/// Called once in about 30 milliseconds.
/// Sends state only to clients connected to server.
///Ordered and without some guarantees.
/// If returns empty Vec<u8> then server skips sending it and go to next iteration
fn draw(&mut self, delta_time: Duration) -> Vec<u8>;
///Allow client with this IP Address work with server.
/// If false server don't send new state to this client.
/// Usually don't implement this method. Use default implementation.
fn allow_connect(&mut self, _from: &SocketAddr) -> bool {
true
}
///Handles events from server.
/// Returns bool value.
/// If returns false stops server.
/// Usually don't implement this method. Use default implementation.
fn handle_server_event(&mut self, _event: ServerEvent) -> ContinueRunning {
true
}
///Client to add to recv state from server.
/// If returns not None then servers on draw sends new state to this client.
/// If client with this IP Address already connected then nothing happens.
/// Usually don't implement this method. Use default implementation.
fn add_client(&mut self) -> Option<SocketAddr> {
None
}
///Disconnect this client from server and don't send new state to them.
/// Usually don't implement this method. Use default implementation.
fn remove_client(&mut self) -> Option<SocketAddr> {
None
}
}
/// Client used to communicate with [`GameServer`]. Must be singleton in your app.
pub struct ClientSocket {
socket: TypedClientSocket,
client: bll::Client,
}
impl ClientSocket {
///Create new client and listen on port to recv packets from server_address and send its to them.
pub fn new(port: u16, server_address: impl ToSocketAddrs) -> Result<ClientSocket, Exception> {
Ok(ClientSocket {
socket: TypedClientSocket::new(port, server_address)?,
client: bll::Client::new(),
})
}
///Send data to server
/// Don't block current thread
/// may wait up to 30 milliseconds if you send commands too often
///Commands ordered and with some guarantees.
pub fn send(&mut self, command: Vec<u8>) -> Result<usize, Exception> {
let command = self.client.send(command);
self.socket.write(&command)
}
///Reads data from server.
/// Don't block current thread.
/// Return [`Exception`] with [`std::io::ErrorKind::WouldBlock`] if there is no data available.
///Data ordered and without some guarantees.
pub fn recv(&mut self) -> Result<Vec<u8>, Exception> {
let state = self.socket.read()?;
let (state, lost) = self.client.recv(state)?;
for command in lost {
self.socket.write(&command)?;
}
Ok(state)
}
}
struct ServerSocket {
socket: TypedServerSocket,
servers: HashMap<SocketAddr, bll::Server>,
}
impl ServerSocket {
pub fn new(port: u16) -> Result<ServerSocket, Exception> {
Ok(ServerSocket {
socket: TypedServerSocket::new(port)?,
servers: HashMap::new(),
})
}
pub fn recv(&mut self) -> Result<(Vec<Vec<u8>>, SocketAddr), Exception> {
let (command, from) = self.socket.read()?;
self.add(&from);
let command = self.servers.get_mut(&from).unwrap().recv(command)?;
Ok((command, from))
}
pub fn | (&mut self, client: &SocketAddr) {
self.servers.remove(&client);
}
pub fn add(&mut self, client: &SocketAddr) {
if !self.servers.contains_key(client) {
self.servers.insert(client.clone(), bll::Server::new());
}
}
pub fn send_to_all(&mut self, state: Vec<u8>) -> Vec<(SocketAddr, Exception)> {
let mut exceptions = Vec::new();
for (a, s) in &mut self.servers {
let _ = self
.socket
.write(a, &s.send(state.clone()))
.map_err(|e| exceptions.push((*a, e)));
}
exceptions
}
}
const DRAW_PERIOD_IN_MILLIS: u64 = 30;
///Game server to run [`Game`]
pub struct GameServer<T: Game> {
game: T,
socket: ServerSocket,
is_running: bool,
draw_timer: bll::timer::WaitTimer,
update_timer: bll::timer::ElapsedTimer,
after_draw_elapsed_timer: bll::timer::ElapsedTimer,
}
impl<T: Game> GameServer<T> {
///Crates new server listening port
pub fn new(game: T, port: u16) -> Result<GameServer<T>, Exception> {
Ok(GameServer {
game,
socket: ServerSocket::new(port)?,
is_running: true,
draw_timer: bll::timer::WaitTimer::new(DRAW_PERIOD_IN_MILLIS),
update_timer: bll::timer::ElapsedTimer::new(),
after_draw_elapsed_timer: bll::timer::ElapsedTimer::new(),
})
}
///Runs game update - draw circle.
/// Blocks current thread.
pub fn run(&mut self) {
while self.is_running {
self.update();
self.draw()
}
}
fn draw(&mut self) {
if self.draw_timer.continue_execution() {
let state = self.game.draw(self.after_draw_elapsed_timer.elapsed());
if state.is_empty() {
return;
}
self.game.add_client().map(|a| self.socket.add(&a));
self.game.remove_client().map(|a| self.socket.remove(&a));
self.is_running &= self
.socket
.send_to_all(state)
.into_iter()
.map(|ex| {
self.game
.handle_server_event(ServerEvent::ExceptionOnSend(ex))
})
.all(|b| b);
}
}
fn update(&mut self) {
let _ = self
.socket
.recv()
.map(|(commands, from)| {
if self.game.allow_connect(&from) {
self.is_running &=
self.game
.handle_command(self.update_timer.elapsed(), commands, from);
} else {
self.socket.remove(&from);
}
})
.map_err(|e| {
self.is_running &= self
.game
.handle_server_event(ServerEvent::ExceptionOnRecv(e))
});
}
}
//trait Game {
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8>;
//}
//
//struct GameProxy {
// game: std::sync::Arc<std::sync::Mutex<Game>>
//}
//
//impl GameProxy {
// fn new(game: std::sync::Arc<std::sync::Mutex<Game>>) -> GameProxy {
//// let mut client = crate::data_access_layer::TypedClientSocket::new("sdsf", "sdfsf").unwrap();
//// let mut server = crate::data_access_layer::TypedServerSocket::new("asdfaf").unwrap();
// GameProxy { game }
// }
//
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8> {
// let mut game = self.game.lock().unwrap();
// game.update(delta_time, commands, from_address)
// }
//}
///// Client used to communicate with server. Must be singleton in your app
//pub struct Client {
// commands: mpsc::Sender<Vec<u8>>,
// states: mpsc::Receiver<Vec<u8>>,
//}
//
//impl Client {
// ///Create new client and listen on port to recv packets from server_address and send its to them
// pub fn new(port: &str, server_address: &str) -> Result<Client, Exception> {
// let mut client = crate::business_logic_layer::Client::new(port, server_address)?;
// crate::data_access_layer::logger::init(LevelFilter::Info)?;
// let (tx, rx) = Client::run_process(client);
// Ok(Client { commands: tx, states: rx })
// }
//
// fn run_process(mut client: crate::business_logic_layer::Client) -> (mpsc::Sender<Vec<u8>>, mpsc::Receiver<Vec<u8>>) {
// let (tx1, rx1) = mpsc::channel();
// let (tx2, rx2) = mpsc::channel();
// thread::spawn(move || {
// const SEND | remove | identifier_name |
lib.rs | type ContinueRunning = bool;
///Game to use with server must implement this trait.
pub trait Game {
/// delta_time: time elapsed from last call.
/// command: ordered commands commands from server.
/// from: Address of command sender.
/// Returns bool value indicating
/// should server continue running if false stops server.
/// Called only when new commands come to server.
/// Commands ordered and with some guarantees.
fn handle_command(
&mut self,
delta_time: Duration,
commands: Vec<Vec<u8>>,
from: SocketAddr,
) -> ContinueRunning;
///Gets new state to send to client.
/// delta_time: time elapsed throw last call.
/// Returns bytes with new game state for client.
/// Called once in about 30 milliseconds.
/// Sends state only to clients connected to server.
///Ordered and without some guarantees.
/// If returns empty Vec<u8> then server skips sending it and go to next iteration
fn draw(&mut self, delta_time: Duration) -> Vec<u8>;
///Allow client with this IP Address work with server.
/// If false server don't send new state to this client.
/// Usually don't implement this method. Use default implementation.
fn allow_connect(&mut self, _from: &SocketAddr) -> bool {
true
}
///Handles events from server.
/// Returns bool value.
/// If returns false stops server.
/// Usually don't implement this method. Use default implementation.
fn handle_server_event(&mut self, _event: ServerEvent) -> ContinueRunning {
true
}
///Client to add to recv state from server.
/// If returns not None then servers on draw sends new state to this client.
/// If client with this IP Address already connected then nothing happens.
/// Usually don't implement this method. Use default implementation.
fn add_client(&mut self) -> Option<SocketAddr> {
None
}
///Disconnect this client from server and don't send new state to them.
/// Usually don't implement this method. Use default implementation.
fn remove_client(&mut self) -> Option<SocketAddr> {
None
}
}
/// Client used to communicate with [`GameServer`]. Must be singleton in your app.
pub struct ClientSocket {
socket: TypedClientSocket,
client: bll::Client,
}
impl ClientSocket {
///Create new client and listen on port to recv packets from server_address and send its to them.
pub fn new(port: u16, server_address: impl ToSocketAddrs) -> Result<ClientSocket, Exception> {
Ok(ClientSocket {
socket: TypedClientSocket::new(port, server_address)?,
client: bll::Client::new(),
})
}
///Send data to server
/// Don't block current thread
/// may wait up to 30 milliseconds if you send commands too often
///Commands ordered and with some guarantees.
pub fn send(&mut self, command: Vec<u8>) -> Result<usize, Exception> {
let command = self.client.send(command);
self.socket.write(&command)
}
///Reads data from server.
/// Don't block current thread.
/// Return [`Exception`] with [`std::io::ErrorKind::WouldBlock`] if there is no data available.
///Data ordered and without some guarantees.
pub fn recv(&mut self) -> Result<Vec<u8>, Exception> |
}
struct ServerSocket {
socket: TypedServerSocket,
servers: HashMap<SocketAddr, bll::Server>,
}
impl ServerSocket {
pub fn new(port: u16) -> Result<ServerSocket, Exception> {
Ok(ServerSocket {
socket: TypedServerSocket::new(port)?,
servers: HashMap::new(),
})
}
pub fn recv(&mut self) -> Result<(Vec<Vec<u8>>, SocketAddr), Exception> {
let (command, from) = self.socket.read()?;
self.add(&from);
let command = self.servers.get_mut(&from).unwrap().recv(command)?;
Ok((command, from))
}
pub fn remove(&mut self, client: &SocketAddr) {
self.servers.remove(&client);
}
pub fn add(&mut self, client: &SocketAddr) {
if !self.servers.contains_key(client) {
self.servers.insert(client.clone(), bll::Server::new());
}
}
pub fn send_to_all(&mut self, state: Vec<u8>) -> Vec<(SocketAddr, Exception)> {
let mut exceptions = Vec::new();
for (a, s) in &mut self.servers {
let _ = self
.socket
.write(a, &s.send(state.clone()))
.map_err(|e| exceptions.push((*a, e)));
}
exceptions
}
}
const DRAW_PERIOD_IN_MILLIS: u64 = 30;
///Game server to run [`Game`]
pub struct GameServer<T: Game> {
game: T,
socket: ServerSocket,
is_running: bool,
draw_timer: bll::timer::WaitTimer,
update_timer: bll::timer::ElapsedTimer,
after_draw_elapsed_timer: bll::timer::ElapsedTimer,
}
impl<T: Game> GameServer<T> {
///Crates new server listening port
pub fn new(game: T, port: u16) -> Result<GameServer<T>, Exception> {
Ok(GameServer {
game,
socket: ServerSocket::new(port)?,
is_running: true,
draw_timer: bll::timer::WaitTimer::new(DRAW_PERIOD_IN_MILLIS),
update_timer: bll::timer::ElapsedTimer::new(),
after_draw_elapsed_timer: bll::timer::ElapsedTimer::new(),
})
}
///Runs game update - draw circle.
/// Blocks current thread.
pub fn run(&mut self) {
while self.is_running {
self.update();
self.draw()
}
}
fn draw(&mut self) {
if self.draw_timer.continue_execution() {
let state = self.game.draw(self.after_draw_elapsed_timer.elapsed());
if state.is_empty() {
return;
}
self.game.add_client().map(|a| self.socket.add(&a));
self.game.remove_client().map(|a| self.socket.remove(&a));
self.is_running &= self
.socket
.send_to_all(state)
.into_iter()
.map(|ex| {
self.game
.handle_server_event(ServerEvent::ExceptionOnSend(ex))
})
.all(|b| b);
}
}
fn update(&mut self) {
let _ = self
.socket
.recv()
.map(|(commands, from)| {
if self.game.allow_connect(&from) {
self.is_running &=
self.game
.handle_command(self.update_timer.elapsed(), commands, from);
} else {
self.socket.remove(&from);
}
})
.map_err(|e| {
self.is_running &= self
.game
.handle_server_event(ServerEvent::ExceptionOnRecv(e))
});
}
}
//trait Game {
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8>;
//}
//
//struct GameProxy {
// game: std::sync::Arc<std::sync::Mutex<Game>>
//}
//
//impl GameProxy {
// fn new(game: std::sync::Arc<std::sync::Mutex<Game>>) -> GameProxy {
//// let mut client = crate::data_access_layer::TypedClientSocket::new("sdsf", "sdfsf").unwrap();
//// let mut server = crate::data_access_layer::TypedServerSocket::new("asdfaf").unwrap();
// GameProxy { game }
// }
//
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8> {
// let mut game = self.game.lock().unwrap();
// game.update(delta_time, commands, from_address)
// }
//}
///// Client used to communicate with server. Must be singleton in your app
//pub struct Client {
// commands: mpsc::Sender<Vec<u8>>,
// states: mpsc::Receiver<Vec<u8>>,
//}
//
//impl Client {
// ///Create new client and listen on port to recv packets from server_address and send its to them
// pub fn new(port: &str, server_address: &str) -> Result<Client, Exception> {
// let mut client = crate::business_logic_layer::Client::new(port, server_address)?;
// crate::data_access_layer::logger::init(LevelFilter::Info)?;
// let (tx, rx) = Client::run_process(client);
// Ok(Client { commands: tx, states: rx })
// }
//
// fn run_process(mut client: crate::business_logic_layer::Client) -> (mpsc::Sender<Vec<u8>>, mpsc::Receiver<Vec<u8>>) {
// let (tx1, rx1) = mpsc::channel();
// let (tx2, rx2) = mpsc::channel();
// thread::spawn(move || {
// const | {
let state = self.socket.read()?;
let (state, lost) = self.client.recv(state)?;
for command in lost {
self.socket.write(&command)?;
}
Ok(state)
} | identifier_body |
lib.rs | type ContinueRunning = bool;
///Game to use with server must implement this trait.
pub trait Game {
/// delta_time: time elapsed from last call.
/// command: ordered commands commands from server.
/// from: Address of command sender.
/// Returns bool value indicating
/// should server continue running if false stops server.
/// Called only when new commands come to server.
/// Commands ordered and with some guarantees.
fn handle_command(
&mut self,
delta_time: Duration,
commands: Vec<Vec<u8>>,
from: SocketAddr,
) -> ContinueRunning;
///Gets new state to send to client.
/// delta_time: time elapsed throw last call.
/// Returns bytes with new game state for client.
/// Called once in about 30 milliseconds.
/// Sends state only to clients connected to server.
///Ordered and without some guarantees.
/// If returns empty Vec<u8> then server skips sending it and go to next iteration
fn draw(&mut self, delta_time: Duration) -> Vec<u8>;
///Allow client with this IP Address work with server.
/// If false server don't send new state to this client.
/// Usually don't implement this method. Use default implementation.
fn allow_connect(&mut self, _from: &SocketAddr) -> bool {
true
}
///Handles events from server.
/// Returns bool value.
/// If returns false stops server.
/// Usually don't implement this method. Use default implementation.
fn handle_server_event(&mut self, _event: ServerEvent) -> ContinueRunning {
true
}
///Client to add to recv state from server.
/// If returns not None then servers on draw sends new state to this client.
/// If client with this IP Address already connected then nothing happens.
/// Usually don't implement this method. Use default implementation.
fn add_client(&mut self) -> Option<SocketAddr> {
None
}
///Disconnect this client from server and don't send new state to them.
/// Usually don't implement this method. Use default implementation.
fn remove_client(&mut self) -> Option<SocketAddr> {
None
}
}
/// Client used to communicate with [`GameServer`]. Must be singleton in your app.
pub struct ClientSocket {
socket: TypedClientSocket,
client: bll::Client,
}
impl ClientSocket {
///Create new client and listen on port to recv packets from server_address and send its to them.
pub fn new(port: u16, server_address: impl ToSocketAddrs) -> Result<ClientSocket, Exception> {
Ok(ClientSocket {
socket: TypedClientSocket::new(port, server_address)?,
client: bll::Client::new(),
})
}
///Send data to server
/// Don't block current thread
/// may wait up to 30 milliseconds if you send commands too often
///Commands ordered and with some guarantees.
pub fn send(&mut self, command: Vec<u8>) -> Result<usize, Exception> {
let command = self.client.send(command);
self.socket.write(&command)
}
///Reads data from server.
/// Don't block current thread.
/// Return [`Exception`] with [`std::io::ErrorKind::WouldBlock`] if there is no data available.
///Data ordered and without some guarantees.
pub fn recv(&mut self) -> Result<Vec<u8>, Exception> {
let state = self.socket.read()?;
let (state, lost) = self.client.recv(state)?;
for command in lost {
self.socket.write(&command)?;
}
Ok(state)
}
}
struct ServerSocket {
socket: TypedServerSocket,
servers: HashMap<SocketAddr, bll::Server>,
}
impl ServerSocket {
pub fn new(port: u16) -> Result<ServerSocket, Exception> {
Ok(ServerSocket {
socket: TypedServerSocket::new(port)?,
servers: HashMap::new(),
})
}
pub fn recv(&mut self) -> Result<(Vec<Vec<u8>>, SocketAddr), Exception> {
let (command, from) = self.socket.read()?;
self.add(&from);
let command = self.servers.get_mut(&from).unwrap().recv(command)?;
Ok((command, from))
}
pub fn remove(&mut self, client: &SocketAddr) {
self.servers.remove(&client);
}
pub fn add(&mut self, client: &SocketAddr) {
if !self.servers.contains_key(client) |
}
pub fn send_to_all(&mut self, state: Vec<u8>) -> Vec<(SocketAddr, Exception)> {
let mut exceptions = Vec::new();
for (a, s) in &mut self.servers {
let _ = self
.socket
.write(a, &s.send(state.clone()))
.map_err(|e| exceptions.push((*a, e)));
}
exceptions
}
}
const DRAW_PERIOD_IN_MILLIS: u64 = 30;
///Game server to run [`Game`]
pub struct GameServer<T: Game> {
game: T,
socket: ServerSocket,
is_running: bool,
draw_timer: bll::timer::WaitTimer,
update_timer: bll::timer::ElapsedTimer,
after_draw_elapsed_timer: bll::timer::ElapsedTimer,
}
impl<T: Game> GameServer<T> {
///Crates new server listening port
pub fn new(game: T, port: u16) -> Result<GameServer<T>, Exception> {
Ok(GameServer {
game,
socket: ServerSocket::new(port)?,
is_running: true,
draw_timer: bll::timer::WaitTimer::new(DRAW_PERIOD_IN_MILLIS),
update_timer: bll::timer::ElapsedTimer::new(),
after_draw_elapsed_timer: bll::timer::ElapsedTimer::new(),
})
}
///Runs game update - draw circle.
/// Blocks current thread.
pub fn run(&mut self) {
while self.is_running {
self.update();
self.draw()
}
}
fn draw(&mut self) {
if self.draw_timer.continue_execution() {
let state = self.game.draw(self.after_draw_elapsed_timer.elapsed());
if state.is_empty() {
return;
}
self.game.add_client().map(|a| self.socket.add(&a));
self.game.remove_client().map(|a| self.socket.remove(&a));
self.is_running &= self
.socket
.send_to_all(state)
.into_iter()
.map(|ex| {
self.game
.handle_server_event(ServerEvent::ExceptionOnSend(ex))
})
.all(|b| b);
}
}
fn update(&mut self) {
let _ = self
.socket
.recv()
.map(|(commands, from)| {
if self.game.allow_connect(&from) {
self.is_running &=
self.game
.handle_command(self.update_timer.elapsed(), commands, from);
} else {
self.socket.remove(&from);
}
})
.map_err(|e| {
self.is_running &= self
.game
.handle_server_event(ServerEvent::ExceptionOnRecv(e))
});
}
}
//trait Game {
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8>;
//}
//
//struct GameProxy {
// game: std::sync::Arc<std::sync::Mutex<Game>>
//}
//
//impl GameProxy {
// fn new(game: std::sync::Arc<std::sync::Mutex<Game>>) -> GameProxy {
//// let mut client = crate::data_access_layer::TypedClientSocket::new("sdsf", "sdfsf").unwrap();
//// let mut server = crate::data_access_layer::TypedServerSocket::new("asdfaf").unwrap();
// GameProxy { game }
// }
//
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8> {
// let mut game = self.game.lock().unwrap();
// game.update(delta_time, commands, from_address)
// }
//}
///// Client used to communicate with server. Must be singleton in your app
//pub struct Client {
// commands: mpsc::Sender<Vec<u8>>,
// states: mpsc::Receiver<Vec<u8>>,
//}
//
//impl Client {
// ///Create new client and listen on port to recv packets from server_address and send its to them
// pub fn new(port: &str, server_address: &str) -> Result<Client, Exception> {
// let mut client = crate::business_logic_layer::Client::new(port, server_address)?;
// crate::data_access_layer::logger::init(LevelFilter::Info)?;
// let (tx, rx) = Client::run_process(client);
// Ok(Client { commands: tx, states: rx })
// }
//
// fn run_process(mut client: crate::business_logic_layer::Client) -> (mpsc::Sender<Vec<u8>>, mpsc::Receiver<Vec<u8>>) {
// let (tx1, rx1) = mpsc::channel();
// let (tx2, rx2) = mpsc::channel();
// thread::spawn(move || {
// const | {
self.servers.insert(client.clone(), bll::Server::new());
} | conditional_block |
lib.rs | type ContinueRunning = bool;
///Game to use with server must implement this trait.
pub trait Game {
/// delta_time: time elapsed from last call.
/// command: ordered commands commands from server.
/// from: Address of command sender.
/// Returns bool value indicating
/// should server continue running if false stops server.
/// Called only when new commands come to server.
/// Commands ordered and with some guarantees.
fn handle_command(
&mut self,
delta_time: Duration,
commands: Vec<Vec<u8>>,
from: SocketAddr,
) -> ContinueRunning;
///Gets new state to send to client.
/// delta_time: time elapsed throw last call.
/// Returns bytes with new game state for client.
/// Called once in about 30 milliseconds.
/// Sends state only to clients connected to server.
///Ordered and without some guarantees.
/// If returns empty Vec<u8> then server skips sending it and go to next iteration
fn draw(&mut self, delta_time: Duration) -> Vec<u8>;
///Allow client with this IP Address work with server.
/// If false server don't send new state to this client.
/// Usually don't implement this method. Use default implementation.
fn allow_connect(&mut self, _from: &SocketAddr) -> bool {
true
}
///Handles events from server.
/// Returns bool value.
/// If returns false stops server.
/// Usually don't implement this method. Use default implementation.
fn handle_server_event(&mut self, _event: ServerEvent) -> ContinueRunning {
true
}
///Client to add to recv state from server.
/// If returns not None then servers on draw sends new state to this client.
/// If client with this IP Address already connected then nothing happens.
/// Usually don't implement this method. Use default implementation.
fn add_client(&mut self) -> Option<SocketAddr> {
None
}
///Disconnect this client from server and don't send new state to them.
/// Usually don't implement this method. Use default implementation.
fn remove_client(&mut self) -> Option<SocketAddr> {
None
}
}
/// Client used to communicate with [`GameServer`]. Must be singleton in your app.
pub struct ClientSocket {
socket: TypedClientSocket,
client: bll::Client,
}
impl ClientSocket {
///Create new client and listen on port to recv packets from server_address and send its to them.
pub fn new(port: u16, server_address: impl ToSocketAddrs) -> Result<ClientSocket, Exception> {
Ok(ClientSocket {
socket: TypedClientSocket::new(port, server_address)?,
client: bll::Client::new(),
})
}
///Send data to server
/// Don't block current thread
/// may wait up to 30 milliseconds if you send commands too often
///Commands ordered and with some guarantees.
pub fn send(&mut self, command: Vec<u8>) -> Result<usize, Exception> {
let command = self.client.send(command);
self.socket.write(&command)
}
///Reads data from server.
/// Don't block current thread.
/// Return [`Exception`] with [`std::io::ErrorKind::WouldBlock`] if there is no data available.
///Data ordered and without some guarantees.
pub fn recv(&mut self) -> Result<Vec<u8>, Exception> {
let state = self.socket.read()?;
let (state, lost) = self.client.recv(state)?;
for command in lost {
self.socket.write(&command)?;
}
Ok(state)
}
}
struct ServerSocket {
socket: TypedServerSocket,
servers: HashMap<SocketAddr, bll::Server>,
}
impl ServerSocket {
pub fn new(port: u16) -> Result<ServerSocket, Exception> {
Ok(ServerSocket {
socket: TypedServerSocket::new(port)?,
servers: HashMap::new(),
})
}
pub fn recv(&mut self) -> Result<(Vec<Vec<u8>>, SocketAddr), Exception> {
let (command, from) = self.socket.read()?;
self.add(&from);
let command = self.servers.get_mut(&from).unwrap().recv(command)?;
Ok((command, from))
}
pub fn remove(&mut self, client: &SocketAddr) {
self.servers.remove(&client);
}
pub fn add(&mut self, client: &SocketAddr) {
if !self.servers.contains_key(client) {
self.servers.insert(client.clone(), bll::Server::new());
}
}
pub fn send_to_all(&mut self, state: Vec<u8>) -> Vec<(SocketAddr, Exception)> {
let mut exceptions = Vec::new();
for (a, s) in &mut self.servers { | exceptions
}
}
const DRAW_PERIOD_IN_MILLIS: u64 = 30;
///Game server to run [`Game`]
pub struct GameServer<T: Game> {
game: T,
socket: ServerSocket,
is_running: bool,
draw_timer: bll::timer::WaitTimer,
update_timer: bll::timer::ElapsedTimer,
after_draw_elapsed_timer: bll::timer::ElapsedTimer,
}
impl<T: Game> GameServer<T> {
///Crates new server listening port
pub fn new(game: T, port: u16) -> Result<GameServer<T>, Exception> {
Ok(GameServer {
game,
socket: ServerSocket::new(port)?,
is_running: true,
draw_timer: bll::timer::WaitTimer::new(DRAW_PERIOD_IN_MILLIS),
update_timer: bll::timer::ElapsedTimer::new(),
after_draw_elapsed_timer: bll::timer::ElapsedTimer::new(),
})
}
///Runs game update - draw circle.
/// Blocks current thread.
pub fn run(&mut self) {
while self.is_running {
self.update();
self.draw()
}
}
fn draw(&mut self) {
if self.draw_timer.continue_execution() {
let state = self.game.draw(self.after_draw_elapsed_timer.elapsed());
if state.is_empty() {
return;
}
self.game.add_client().map(|a| self.socket.add(&a));
self.game.remove_client().map(|a| self.socket.remove(&a));
self.is_running &= self
.socket
.send_to_all(state)
.into_iter()
.map(|ex| {
self.game
.handle_server_event(ServerEvent::ExceptionOnSend(ex))
})
.all(|b| b);
}
}
fn update(&mut self) {
let _ = self
.socket
.recv()
.map(|(commands, from)| {
if self.game.allow_connect(&from) {
self.is_running &=
self.game
.handle_command(self.update_timer.elapsed(), commands, from);
} else {
self.socket.remove(&from);
}
})
.map_err(|e| {
self.is_running &= self
.game
.handle_server_event(ServerEvent::ExceptionOnRecv(e))
});
}
}
//trait Game {
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8>;
//}
//
//struct GameProxy {
// game: std::sync::Arc<std::sync::Mutex<Game>>
//}
//
//impl GameProxy {
// fn new(game: std::sync::Arc<std::sync::Mutex<Game>>) -> GameProxy {
//// let mut client = crate::data_access_layer::TypedClientSocket::new("sdsf", "sdfsf").unwrap();
//// let mut server = crate::data_access_layer::TypedServerSocket::new("asdfaf").unwrap();
// GameProxy { game }
// }
//
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8> {
// let mut game = self.game.lock().unwrap();
// game.update(delta_time, commands, from_address)
// }
//}
///// Client used to communicate with server. Must be singleton in your app
//pub struct Client {
// commands: mpsc::Sender<Vec<u8>>,
// states: mpsc::Receiver<Vec<u8>>,
//}
//
//impl Client {
// ///Create new client and listen on port to recv packets from server_address and send its to them
// pub fn new(port: &str, server_address: &str) -> Result<Client, Exception> {
// let mut client = crate::business_logic_layer::Client::new(port, server_address)?;
// crate::data_access_layer::logger::init(LevelFilter::Info)?;
// let (tx, rx) = Client::run_process(client);
// Ok(Client { commands: tx, states: rx })
// }
//
// fn run_process(mut client: crate::business_logic_layer::Client) -> (mpsc::Sender<Vec<u8>>, mpsc::Receiver<Vec<u8>>) {
// let (tx1, rx1) = mpsc::channel();
// let (tx2, rx2) = mpsc::channel();
// thread::spawn(move || {
// const SEND_TIMEOUT | let _ = self
.socket
.write(a, &s.send(state.clone()))
.map_err(|e| exceptions.push((*a, e)));
} | random_line_split |
dicer.py | 2, cv2.LINE_AA)
pos_img = np.zeros(shape=[100, 100, 1], dtype=np.uint8)
cv2.imshow('Press any key to exit', grey)
print('Error - stopping')
cv2.waitKey() # Taste drücken, zum beenden
elif GPIO.input(18) == 0 and gpios == True: # Temperaturreais prüfen wenn RPi vorhanden
print('Temperature relay is offline, stopping')
else:
dicer_ready = True
global_steptime = 0.00015 # Abstand zwischen den Schritten
# blobdetektor konfigurieren
blob_params = cv2.SimpleBlobDetector_Params()
blob_params.filterByColor = True
blob_params.filterByArea = True
blob_params.minArea = 100
blob_params.filterByCircularity = True
blob_params.minCircularity = 0.7
blob_params.filterByInertia = False
blob_params.filterByConvexity = False
all_numbers = [0] * 9 # [one, two, three, four, five, six, errorcnt, rollnumber, std_dev
def interr(channel):
global gpios
global dicer_ready
global interrupted
gpios = False
dicer_ready = False
interrupted = True
print('Interrupt')
def step_plus(steptime):
GPIO.output(17, GPIO.LOW)
GPIO.output(4, GPIO.HIGH)
time.sleep(steptime)
GPIO.output(4, GPIO.LOW)
time.sleep(steptime)
def step_minus(steptime):
GPIO.output(17, GPIO.HIGH)
GPIO.output(4, GPIO.HIGH)
time.sleep(steptime)
GPIO.output(4, GPIO.LOW)
time.sleep(steptime)
GPIO.output(17, GPIO.LOW)
def clock(now):
time_seconds = int((time.time() - now))
t_hr = int(time_seconds / 3600)
t_min = int(time_seconds / 60) - (t_hr * 60)
t_sec = int(time_seconds) - (t_min * 60)
showTime = str(t_hr) + ':' + str(t_min).zfill(2)
print(showTime)
return showTime
def write_email(numbers, ctime, error, header_name):
server = smtplib.SMTP('SERVERADRESSE', PORTNR)
server.starttls()
server.login('LOGIN-BENUTZERNAME', 'PASSWORT')
msg = MIMEMultipart()
msg['From'] = 'ABSENDER'
msg['To'] = 'EMPFAENGER'
if error:
msg['Subject'] = 'Error'
else:
msg['Cc'] = 'KOPIE ADRESSE'
msg['Subject'] = header_name
message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]) + ',' + str(numbers[3]) + ',' + str(
numbers[4]) + ',' + str(numbers[5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(
numbers[7]) + '\n' + 'Zeit: '+ str(ctime)
msg.attach(MIMEText(message))
server.send_message(msg)
def logging(numbers, ctime, log_name):
file = open(log_name, 'w')
file.write('Einz:' + str(numbers[0]) + '\n')
file.write('Zwei:' + str(numbers[1]) + '\n')
file.write("Drei: " + str(numbers[2]) + '\n')
file.write("Vier: " + str(numbers[3]) + '\n')
file.write("Fuenf: " + str(numbers[4]) + '\n')
file.write("Sechs: " + str(numbers[5]) + '\n')
file.write('Fehler: ' + str(numbers[6]) + '\n')
file.write('Gesamt: ' + str(numbers[7]) + '\n')
file.write('Standardabw: ' + str(numbers[8]) + '\n')
file.write('Zeit: ' + str(ctime) + '\n')
file.close()
def get_images():
for i in range(5):
ret, fram | 2.imwrite('frame.png',frame)
# Bildausschnitte von Würfel und Positionserkennung
y = 160
h = 240
x = 220
w = 240
dice_image = frame[y:y + h, x:x + w]
grey = cv2.cvtColor(dice_image, cv2.COLOR_BGR2GRAY)
#cv2.imshow('input', grey)
#cv2.imwrite('real_image.png',frame)
y = 120
h = 15
pos_img = frame[y:y + h, x:x + w]
pos_img = cv2.cvtColor(pos_img, cv2.COLOR_BGR2GRAY)
#cv2.imwrite('pos_raw.png',pos_img)
ret, pos_img = cv2.threshold(pos_img, 245, 255, cv2.THRESH_BINARY)
#cv2.imshow('pos', pos_img)
#cv2.imwrite('pos.png',pos_img)
return grey, pos_img
def hough_detector(input_img):
#cv2.imshow('hough_input', input_image)
img = cv2.medianBlur(input_img, 5) # Bild gätten mit Gauß
cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) # Farbraum umwandeln (nur für die farbigen Kreise)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=200, param2=10, minRadius=5,
maxRadius=25) # param1: Schwellenwert, param2: muss man ausprobieren
h_number = 0
try: # Kreise zählen und markieren
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
# draw the outer circle
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
h_number += 1
except:
print('HOUGH DETECTOR ERROR, NO CIRCLES FOUND')
cv2.putText(cimg, str(h_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 50), 2, cv2.LINE_AA)
cv2.imshow('hough detector', cimg)
cv2.imwrite('hough detector.png', cimg)
return h_number
def img_processing(image_input): # Bild vorbereitung
image_input = cv2.medianBlur(image_input, 3) # Bild gätten mit Gauß
ret, binary_image = cv2.threshold(image_input, 220, 255,
cv2.THRESH_BINARY) # Schwellenwertbild
#cv2.imwrite('binary1.png', binary_image)
if darknumbers: # Wenn dunkle Würfelaugen, dann Bereich um den Würfel weiß machen
w = binary_image.shape[1] #y
h = binary_image.shape[0] #x
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.floodFill(binary_image, mask, (0,0), 255);
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.floodFill(binary_image, mask, (h,w), 255);
else:
binary_image = cv2.bitwise_not(binary_image) # Bei hellen Würfelaugen reicht invertieren des Bildes
#cv2.imwrite('binary2.png', binary_image)
kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, | e = cap.read()
#cv | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.