file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
indexable.py
#=============================================================================== # Copyright (c) 2015, Max Zwiessele # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of paramz.core.indexable nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #=============================================================================== import numpy as np from .nameable import Nameable from .updateable import Updateable from ..transformations import __fixed__ from operator import delitem from functools import reduce from collections import OrderedDict class Indexable(Nameable, Updateable): """ Make an object constrainable with Priors and Transformations. TODO: Mappings!! (As in ties etc.) Adding a constraint to a Parameter means to tell the highest parent that the constraint was added and making sure that all parameters covered by this object are indeed conforming to the constraint. :func:`constrain()` and :func:`unconstrain()` are main methods here """ def __init__(self, name, default_constraint=None, *a, **kw): super(Indexable, self).__init__(name=name, *a, **kw) self._index_operations = OrderedDict() def __setstate__(self, state): super(Indexable, self).__setstate__(state) for name in self._index_operations: self._add_io(name, self._index_operations[name]) #@property #def _index_operations(self): # try: # return self._index_operations_dict # except AttributeError: # self._index_operations_dict = OrderedDict() # return self._index_operations_dict #@_index_operations.setter #def _index_operations(self, io): # self._index_operations_dict = io def add_index_operation(self, name, operations): """ Add index operation with name to the operations given. raises: attribute error if operations exist. """ if name not in self._index_operations:
else: raise AttributeError("An index operation with the name {} was already taken".format(name)) def _add_io(self, name, operations): self._index_operations[name] = operations def do_raise(self, x): self._index_operations.__setitem__(name, x) self._connect_fixes() self._notify_parent_change() #raise AttributeError("Cannot set {name} directly, use the appropriate methods to set new {name}".format(name=name)) setattr(Indexable, name, property(fget=lambda self: self._index_operations[name], fset=do_raise)) def remove_index_operation(self, name): if name in self._index_operations: delitem(self._index_operations, name) #delattr(self, name) else: raise AttributeError("No index operation with the name {}".format(name)) def _disconnect_parent(self, *args, **kw): """ From Parentable: disconnect the parent and set the new constraints to constr """ for name, iop in list(self._index_operations.items()): iopc = iop.copy() iop.clear() self.remove_index_operation(name) self.add_index_operation(name, iopc) #self.constraints.clear() #self.constraints = constr self._parent_ = None self._parent_index_ = None self._connect_fixes() self._notify_parent_change() #=========================================================================== # Indexable #=========================================================================== def _offset_for(self, param): """ Return the offset of the param inside this parameterized object. This does not need to account for shaped parameters, as it basically just sums up the parameter sizes which come before param. """ if param.has_parent(): p = param._parent_._get_original(param) if p in self.parameters: return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0) return self._offset_for(param._parent_) + param._parent_._offset_for(param) return 0 ### Global index operations (from highest_parent) ### These indices are for gradchecking, so that we ### can index the optimizer array and manipulate it directly ### The indices here do not reflect the indices in ### index_operations, as index operations handle ### the offset themselves and can be set directly ### without doing the offset. def _raveled_index_for(self, param): """ get the raveled index for a param that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work """ from ..param import ParamConcatenation if isinstance(param, ParamConcatenation): return np.hstack((self._raveled_index_for(p) for p in param.params)) return param._raveled_index() + self._offset_for(param) def _raveled_index_for_transformed(self, param): """ get the raveled index for a param for the transformed parameter array (optimizer array). that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work. If you do not know what you are doing, do not use this method, it will have unexpected returns! """ ravi = self._raveled_index_for(param) if self._has_fixes(): fixes = self._fixes_ ### Transformed indices, handling the offsets of previous fixes transformed = (np.r_[:self.size] - (~fixes).cumsum()) return transformed[ravi[fixes[ravi]]] else: return ravi ### These indices are just the raveled index for self ### These are in the index_operations are used for them ### The index_operations then handle the offsets themselves ### This makes it easier to test and handle indices ### as the index operations framework is in its own ### corner and can be set significantly better without ### being inside the parameterized scope. def _raveled_index(self): """ Flattened array of ints, specifying the index of this object. This has to account for shaped parameters! """ return np.r_[:self.size] ###### #=========================================================================== # Tie parameters together # TODO: create own class for tieing and remapping #=========================================================================== # def _has_ties(self): # if self._highest_parent_.tie.tied_param is None: # return False # if self.has_parent(): # return self._highest_parent_.tie.label_buf[self._highest_parent_._raveled_index_for(self)].sum()>0 # return True # # def tie_together(self): # self._highest_parent_.tie.add_tied_parameter(self) # self._highest_parent_._set_fixed(self,self._raveled_index()) # self._trigger_params_changed() #=============================================================================== def _parent_changed(self, parent): """ From Parentable: Called when the parent changed update the constraints and priors view, so that constraining is automized for the parent. """ from .index_operations import ParameterIndexOperationsView #if getattr(self, "_in_init_"): #import ipdb;ipdb.set_trace() #self.constraints.update(param.constraints, start) #self.priors.update(param.priors, start) offset = parent._offset_for(self) for name, iop in list(self._index_operations.items()): self.remove_index_operation(name) self.add_index_operation(name, ParameterIndexOperationsView(parent._index_operations[name], offset, self.size)) self._fixes_ = None for p in self.parameters: p._parent_changed(parent) def _add_to_index_operations(self, which, reconstrained, what, warning): """ Helper preventing copy code. This adds the given what (transformation, prior etc) to parameter index operations which. reconstrained are reconstrained indices. warn when reconstraining parameters if warning is True. TODO: find out which parameters have changed specifically """ if warning and reconstrained.size > 0: # TODO: figure out which parameters have changed and only print those print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name)) index = self._raveled_index() which.add(what, index) return index def _remove_from_index_operations(self, which, transforms): """ Helper preventing copy code. Remove given what (transform prior etc) from which param index ops. """ if len(transforms) == 0: transforms = which.properties() removed = np.empty((0,), dtype=int) for t in list(transforms): unconstrained = which.remove(t, self._raveled_index()) removed = np.union1d(removed, unconstrained) if t is __fixed__: self._highest_parent_._set_unfixed(self, unconstrained) return removed
self._add_io(name, operations)
conditional_block
indexable.py
#=============================================================================== # Copyright (c) 2015, Max Zwiessele # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of paramz.core.indexable nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #=============================================================================== import numpy as np from .nameable import Nameable from .updateable import Updateable from ..transformations import __fixed__ from operator import delitem from functools import reduce from collections import OrderedDict class Indexable(Nameable, Updateable): """ Make an object constrainable with Priors and Transformations. TODO: Mappings!! (As in ties etc.) Adding a constraint to a Parameter means to tell the highest parent that the constraint was added and making sure that all parameters covered by this object are indeed conforming to the constraint. :func:`constrain()` and :func:`unconstrain()` are main methods here """ def __init__(self, name, default_constraint=None, *a, **kw): super(Indexable, self).__init__(name=name, *a, **kw) self._index_operations = OrderedDict() def __setstate__(self, state): super(Indexable, self).__setstate__(state) for name in self._index_operations: self._add_io(name, self._index_operations[name]) #@property #def _index_operations(self): # try: # return self._index_operations_dict # except AttributeError: # self._index_operations_dict = OrderedDict() # return self._index_operations_dict #@_index_operations.setter #def _index_operations(self, io): # self._index_operations_dict = io def add_index_operation(self, name, operations): """ Add index operation with name to the operations given. raises: attribute error if operations exist. """ if name not in self._index_operations: self._add_io(name, operations) else: raise AttributeError("An index operation with the name {} was already taken".format(name)) def _add_io(self, name, operations): self._index_operations[name] = operations def do_raise(self, x): self._index_operations.__setitem__(name, x) self._connect_fixes() self._notify_parent_change() #raise AttributeError("Cannot set {name} directly, use the appropriate methods to set new {name}".format(name=name)) setattr(Indexable, name, property(fget=lambda self: self._index_operations[name], fset=do_raise)) def remove_index_operation(self, name): if name in self._index_operations: delitem(self._index_operations, name) #delattr(self, name) else: raise AttributeError("No index operation with the name {}".format(name)) def _disconnect_parent(self, *args, **kw): """ From Parentable: disconnect the parent and set the new constraints to constr """ for name, iop in list(self._index_operations.items()): iopc = iop.copy() iop.clear() self.remove_index_operation(name) self.add_index_operation(name, iopc) #self.constraints.clear() #self.constraints = constr self._parent_ = None self._parent_index_ = None self._connect_fixes() self._notify_parent_change() #=========================================================================== # Indexable #===========================================================================
""" if param.has_parent(): p = param._parent_._get_original(param) if p in self.parameters: return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0) return self._offset_for(param._parent_) + param._parent_._offset_for(param) return 0 ### Global index operations (from highest_parent) ### These indices are for gradchecking, so that we ### can index the optimizer array and manipulate it directly ### The indices here do not reflect the indices in ### index_operations, as index operations handle ### the offset themselves and can be set directly ### without doing the offset. def _raveled_index_for(self, param): """ get the raveled index for a param that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work """ from ..param import ParamConcatenation if isinstance(param, ParamConcatenation): return np.hstack((self._raveled_index_for(p) for p in param.params)) return param._raveled_index() + self._offset_for(param) def _raveled_index_for_transformed(self, param): """ get the raveled index for a param for the transformed parameter array (optimizer array). that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work. If you do not know what you are doing, do not use this method, it will have unexpected returns! """ ravi = self._raveled_index_for(param) if self._has_fixes(): fixes = self._fixes_ ### Transformed indices, handling the offsets of previous fixes transformed = (np.r_[:self.size] - (~fixes).cumsum()) return transformed[ravi[fixes[ravi]]] else: return ravi ### These indices are just the raveled index for self ### These are in the index_operations are used for them ### The index_operations then handle the offsets themselves ### This makes it easier to test and handle indices ### as the index operations framework is in its own ### corner and can be set significantly better without ### being inside the parameterized scope. def _raveled_index(self): """ Flattened array of ints, specifying the index of this object. This has to account for shaped parameters! """ return np.r_[:self.size] ###### #=========================================================================== # Tie parameters together # TODO: create own class for tieing and remapping #=========================================================================== # def _has_ties(self): # if self._highest_parent_.tie.tied_param is None: # return False # if self.has_parent(): # return self._highest_parent_.tie.label_buf[self._highest_parent_._raveled_index_for(self)].sum()>0 # return True # # def tie_together(self): # self._highest_parent_.tie.add_tied_parameter(self) # self._highest_parent_._set_fixed(self,self._raveled_index()) # self._trigger_params_changed() #=============================================================================== def _parent_changed(self, parent): """ From Parentable: Called when the parent changed update the constraints and priors view, so that constraining is automized for the parent. """ from .index_operations import ParameterIndexOperationsView #if getattr(self, "_in_init_"): #import ipdb;ipdb.set_trace() #self.constraints.update(param.constraints, start) #self.priors.update(param.priors, start) offset = parent._offset_for(self) for name, iop in list(self._index_operations.items()): self.remove_index_operation(name) self.add_index_operation(name, ParameterIndexOperationsView(parent._index_operations[name], offset, self.size)) self._fixes_ = None for p in self.parameters: p._parent_changed(parent) def _add_to_index_operations(self, which, reconstrained, what, warning): """ Helper preventing copy code. This adds the given what (transformation, prior etc) to parameter index operations which. reconstrained are reconstrained indices. warn when reconstraining parameters if warning is True. TODO: find out which parameters have changed specifically """ if warning and reconstrained.size > 0: # TODO: figure out which parameters have changed and only print those print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name)) index = self._raveled_index() which.add(what, index) return index def _remove_from_index_operations(self, which, transforms): """ Helper preventing copy code. Remove given what (transform prior etc) from which param index ops. """ if len(transforms) == 0: transforms = which.properties() removed = np.empty((0,), dtype=int) for t in list(transforms): unconstrained = which.remove(t, self._raveled_index()) removed = np.union1d(removed, unconstrained) if t is __fixed__: self._highest_parent_._set_unfixed(self, unconstrained) return removed
def _offset_for(self, param): """ Return the offset of the param inside this parameterized object. This does not need to account for shaped parameters, as it basically just sums up the parameter sizes which come before param.
random_line_split
indexable.py
#=============================================================================== # Copyright (c) 2015, Max Zwiessele # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of paramz.core.indexable nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #=============================================================================== import numpy as np from .nameable import Nameable from .updateable import Updateable from ..transformations import __fixed__ from operator import delitem from functools import reduce from collections import OrderedDict class Indexable(Nameable, Updateable): """ Make an object constrainable with Priors and Transformations. TODO: Mappings!! (As in ties etc.) Adding a constraint to a Parameter means to tell the highest parent that the constraint was added and making sure that all parameters covered by this object are indeed conforming to the constraint. :func:`constrain()` and :func:`unconstrain()` are main methods here """ def __init__(self, name, default_constraint=None, *a, **kw): super(Indexable, self).__init__(name=name, *a, **kw) self._index_operations = OrderedDict() def __setstate__(self, state): super(Indexable, self).__setstate__(state) for name in self._index_operations: self._add_io(name, self._index_operations[name]) #@property #def _index_operations(self): # try: # return self._index_operations_dict # except AttributeError: # self._index_operations_dict = OrderedDict() # return self._index_operations_dict #@_index_operations.setter #def _index_operations(self, io): # self._index_operations_dict = io def add_index_operation(self, name, operations): """ Add index operation with name to the operations given. raises: attribute error if operations exist. """ if name not in self._index_operations: self._add_io(name, operations) else: raise AttributeError("An index operation with the name {} was already taken".format(name)) def _add_io(self, name, operations): self._index_operations[name] = operations def do_raise(self, x): self._index_operations.__setitem__(name, x) self._connect_fixes() self._notify_parent_change() #raise AttributeError("Cannot set {name} directly, use the appropriate methods to set new {name}".format(name=name)) setattr(Indexable, name, property(fget=lambda self: self._index_operations[name], fset=do_raise)) def remove_index_operation(self, name): if name in self._index_operations: delitem(self._index_operations, name) #delattr(self, name) else: raise AttributeError("No index operation with the name {}".format(name)) def _disconnect_parent(self, *args, **kw): """ From Parentable: disconnect the parent and set the new constraints to constr """ for name, iop in list(self._index_operations.items()): iopc = iop.copy() iop.clear() self.remove_index_operation(name) self.add_index_operation(name, iopc) #self.constraints.clear() #self.constraints = constr self._parent_ = None self._parent_index_ = None self._connect_fixes() self._notify_parent_change() #=========================================================================== # Indexable #=========================================================================== def _offset_for(self, param):
### Global index operations (from highest_parent) ### These indices are for gradchecking, so that we ### can index the optimizer array and manipulate it directly ### The indices here do not reflect the indices in ### index_operations, as index operations handle ### the offset themselves and can be set directly ### without doing the offset. def _raveled_index_for(self, param): """ get the raveled index for a param that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work """ from ..param import ParamConcatenation if isinstance(param, ParamConcatenation): return np.hstack((self._raveled_index_for(p) for p in param.params)) return param._raveled_index() + self._offset_for(param) def _raveled_index_for_transformed(self, param): """ get the raveled index for a param for the transformed parameter array (optimizer array). that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work. If you do not know what you are doing, do not use this method, it will have unexpected returns! """ ravi = self._raveled_index_for(param) if self._has_fixes(): fixes = self._fixes_ ### Transformed indices, handling the offsets of previous fixes transformed = (np.r_[:self.size] - (~fixes).cumsum()) return transformed[ravi[fixes[ravi]]] else: return ravi ### These indices are just the raveled index for self ### These are in the index_operations are used for them ### The index_operations then handle the offsets themselves ### This makes it easier to test and handle indices ### as the index operations framework is in its own ### corner and can be set significantly better without ### being inside the parameterized scope. def _raveled_index(self): """ Flattened array of ints, specifying the index of this object. This has to account for shaped parameters! """ return np.r_[:self.size] ###### #=========================================================================== # Tie parameters together # TODO: create own class for tieing and remapping #=========================================================================== # def _has_ties(self): # if self._highest_parent_.tie.tied_param is None: # return False # if self.has_parent(): # return self._highest_parent_.tie.label_buf[self._highest_parent_._raveled_index_for(self)].sum()>0 # return True # # def tie_together(self): # self._highest_parent_.tie.add_tied_parameter(self) # self._highest_parent_._set_fixed(self,self._raveled_index()) # self._trigger_params_changed() #=============================================================================== def _parent_changed(self, parent): """ From Parentable: Called when the parent changed update the constraints and priors view, so that constraining is automized for the parent. """ from .index_operations import ParameterIndexOperationsView #if getattr(self, "_in_init_"): #import ipdb;ipdb.set_trace() #self.constraints.update(param.constraints, start) #self.priors.update(param.priors, start) offset = parent._offset_for(self) for name, iop in list(self._index_operations.items()): self.remove_index_operation(name) self.add_index_operation(name, ParameterIndexOperationsView(parent._index_operations[name], offset, self.size)) self._fixes_ = None for p in self.parameters: p._parent_changed(parent) def _add_to_index_operations(self, which, reconstrained, what, warning): """ Helper preventing copy code. This adds the given what (transformation, prior etc) to parameter index operations which. reconstrained are reconstrained indices. warn when reconstraining parameters if warning is True. TODO: find out which parameters have changed specifically """ if warning and reconstrained.size > 0: # TODO: figure out which parameters have changed and only print those print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name)) index = self._raveled_index() which.add(what, index) return index def _remove_from_index_operations(self, which, transforms): """ Helper preventing copy code. Remove given what (transform prior etc) from which param index ops. """ if len(transforms) == 0: transforms = which.properties() removed = np.empty((0,), dtype=int) for t in list(transforms): unconstrained = which.remove(t, self._raveled_index()) removed = np.union1d(removed, unconstrained) if t is __fixed__: self._highest_parent_._set_unfixed(self, unconstrained) return removed
""" Return the offset of the param inside this parameterized object. This does not need to account for shaped parameters, as it basically just sums up the parameter sizes which come before param. """ if param.has_parent(): p = param._parent_._get_original(param) if p in self.parameters: return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0) return self._offset_for(param._parent_) + param._parent_._offset_for(param) return 0
identifier_body
indexable.py
#=============================================================================== # Copyright (c) 2015, Max Zwiessele # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of paramz.core.indexable nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #=============================================================================== import numpy as np from .nameable import Nameable from .updateable import Updateable from ..transformations import __fixed__ from operator import delitem from functools import reduce from collections import OrderedDict class Indexable(Nameable, Updateable): """ Make an object constrainable with Priors and Transformations. TODO: Mappings!! (As in ties etc.) Adding a constraint to a Parameter means to tell the highest parent that the constraint was added and making sure that all parameters covered by this object are indeed conforming to the constraint. :func:`constrain()` and :func:`unconstrain()` are main methods here """ def __init__(self, name, default_constraint=None, *a, **kw): super(Indexable, self).__init__(name=name, *a, **kw) self._index_operations = OrderedDict() def __setstate__(self, state): super(Indexable, self).__setstate__(state) for name in self._index_operations: self._add_io(name, self._index_operations[name]) #@property #def _index_operations(self): # try: # return self._index_operations_dict # except AttributeError: # self._index_operations_dict = OrderedDict() # return self._index_operations_dict #@_index_operations.setter #def _index_operations(self, io): # self._index_operations_dict = io def add_index_operation(self, name, operations): """ Add index operation with name to the operations given. raises: attribute error if operations exist. """ if name not in self._index_operations: self._add_io(name, operations) else: raise AttributeError("An index operation with the name {} was already taken".format(name)) def _add_io(self, name, operations): self._index_operations[name] = operations def do_raise(self, x): self._index_operations.__setitem__(name, x) self._connect_fixes() self._notify_parent_change() #raise AttributeError("Cannot set {name} directly, use the appropriate methods to set new {name}".format(name=name)) setattr(Indexable, name, property(fget=lambda self: self._index_operations[name], fset=do_raise)) def
(self, name): if name in self._index_operations: delitem(self._index_operations, name) #delattr(self, name) else: raise AttributeError("No index operation with the name {}".format(name)) def _disconnect_parent(self, *args, **kw): """ From Parentable: disconnect the parent and set the new constraints to constr """ for name, iop in list(self._index_operations.items()): iopc = iop.copy() iop.clear() self.remove_index_operation(name) self.add_index_operation(name, iopc) #self.constraints.clear() #self.constraints = constr self._parent_ = None self._parent_index_ = None self._connect_fixes() self._notify_parent_change() #=========================================================================== # Indexable #=========================================================================== def _offset_for(self, param): """ Return the offset of the param inside this parameterized object. This does not need to account for shaped parameters, as it basically just sums up the parameter sizes which come before param. """ if param.has_parent(): p = param._parent_._get_original(param) if p in self.parameters: return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0) return self._offset_for(param._parent_) + param._parent_._offset_for(param) return 0 ### Global index operations (from highest_parent) ### These indices are for gradchecking, so that we ### can index the optimizer array and manipulate it directly ### The indices here do not reflect the indices in ### index_operations, as index operations handle ### the offset themselves and can be set directly ### without doing the offset. def _raveled_index_for(self, param): """ get the raveled index for a param that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work """ from ..param import ParamConcatenation if isinstance(param, ParamConcatenation): return np.hstack((self._raveled_index_for(p) for p in param.params)) return param._raveled_index() + self._offset_for(param) def _raveled_index_for_transformed(self, param): """ get the raveled index for a param for the transformed parameter array (optimizer array). that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work. If you do not know what you are doing, do not use this method, it will have unexpected returns! """ ravi = self._raveled_index_for(param) if self._has_fixes(): fixes = self._fixes_ ### Transformed indices, handling the offsets of previous fixes transformed = (np.r_[:self.size] - (~fixes).cumsum()) return transformed[ravi[fixes[ravi]]] else: return ravi ### These indices are just the raveled index for self ### These are in the index_operations are used for them ### The index_operations then handle the offsets themselves ### This makes it easier to test and handle indices ### as the index operations framework is in its own ### corner and can be set significantly better without ### being inside the parameterized scope. def _raveled_index(self): """ Flattened array of ints, specifying the index of this object. This has to account for shaped parameters! """ return np.r_[:self.size] ###### #=========================================================================== # Tie parameters together # TODO: create own class for tieing and remapping #=========================================================================== # def _has_ties(self): # if self._highest_parent_.tie.tied_param is None: # return False # if self.has_parent(): # return self._highest_parent_.tie.label_buf[self._highest_parent_._raveled_index_for(self)].sum()>0 # return True # # def tie_together(self): # self._highest_parent_.tie.add_tied_parameter(self) # self._highest_parent_._set_fixed(self,self._raveled_index()) # self._trigger_params_changed() #=============================================================================== def _parent_changed(self, parent): """ From Parentable: Called when the parent changed update the constraints and priors view, so that constraining is automized for the parent. """ from .index_operations import ParameterIndexOperationsView #if getattr(self, "_in_init_"): #import ipdb;ipdb.set_trace() #self.constraints.update(param.constraints, start) #self.priors.update(param.priors, start) offset = parent._offset_for(self) for name, iop in list(self._index_operations.items()): self.remove_index_operation(name) self.add_index_operation(name, ParameterIndexOperationsView(parent._index_operations[name], offset, self.size)) self._fixes_ = None for p in self.parameters: p._parent_changed(parent) def _add_to_index_operations(self, which, reconstrained, what, warning): """ Helper preventing copy code. This adds the given what (transformation, prior etc) to parameter index operations which. reconstrained are reconstrained indices. warn when reconstraining parameters if warning is True. TODO: find out which parameters have changed specifically """ if warning and reconstrained.size > 0: # TODO: figure out which parameters have changed and only print those print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name)) index = self._raveled_index() which.add(what, index) return index def _remove_from_index_operations(self, which, transforms): """ Helper preventing copy code. Remove given what (transform prior etc) from which param index ops. """ if len(transforms) == 0: transforms = which.properties() removed = np.empty((0,), dtype=int) for t in list(transforms): unconstrained = which.remove(t, self._raveled_index()) removed = np.union1d(removed, unconstrained) if t is __fixed__: self._highest_parent_._set_unfixed(self, unconstrained) return removed
remove_index_operation
identifier_name
parser.go
/* Copyright 2016 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package gonids implements a basic parser of IDS rules. // // For now the parser is very basic and it only parses a subset of fields. // We intentionally omit http_encode as it doesn't seem to be used in practice. package gonids import ( "encoding/hex" "errors" "fmt" "net" "regexp" "strconv" "strings" ) // hexRE matches on hexadecimal content like |41 41 41| for example. var hexRE = regexp.MustCompile(`(?i)(\|(?:\s*[a-f0-9]{2}\s*)+\|)`) // escapeRE matches char that needs to escaped in regexp. var escapeRE = regexp.MustCompile(`([()+.'\\])`) // escapeContent matches escaped special characters. var escapeContent = regexp.MustCompile(`\\([\\;":])`) // metaSplitRE matches string in metadata. var metaSplitRE = regexp.MustCompile(`,\s*`) // nestedNetRE matches nested network groups. var nestedNetRE = regexp.MustCompile(`,(!?\[[^]]*\])`) // portSplitRE splits port lists and ranges for validation. var portSplitRE = regexp.MustCompile(`[:,]`) var appLayerProtocols = []string{ "dcerpc", "dhcp", "dnp3", "dns", "enip", "ftp", "ftp-data", "http", "http2", "icmp", "icmpv4", "icmpv6", "ikev2", "imap", "ip", "ip4", "ip6", "ipv4", "ipv6", "irc", "jabber", "krb5", "modbus", "mqtt", "nfs", "ntp", "pkthdr", "rdp", "rfb", "sctp", "sip", "smb", "smtp", "snmp", "ssh", "tcp", "tcp-pkt", "tcp-stream", "tftp", "tls", "udp", } // parseContent decodes rule content match. For now it only takes care of escaped and hex // encoded content. func parseContent(content string) ([]byte, error) { // Decode and replace all occurrences of hexadecimal content. var errpanic error defer func() { r := recover() if r != nil { errpanic = fmt.Errorf("recovered from panic: %v", r) } }() if containsUnescaped(content) { return nil, fmt.Errorf("invalid special characters escaping") } b := escapeContent.ReplaceAllString(content, "$1") b = hexRE.ReplaceAllStringFunc(b, func(h string) string { r, err := hex.DecodeString(strings.Replace(strings.Trim(h, "|"), " ", "", -1)) if err != nil { panic("invalid hexRE regexp") } return string(r) }) return []byte(b), errpanic } // parsePCRE parses the components of a PCRE. Returns PCRE struct. func parsePCRE(s string) (*PCRE, error) { c := strings.Count(s, "/") if c < 2 { return nil, fmt.Errorf("all pcre patterns must contain at least 2 '/', found: %d", c) } l := strings.LastIndex(s, "/") if l < 0 { return nil, fmt.Errorf("couldn't find options in PCRE") } i := strings.Index(s, "/") if l < 0 { return nil, fmt.Errorf("couldn't find start of pattern") } return &PCRE{ Pattern: []byte(s[i+1 : l]), Options: []byte(s[l+1:]), }, nil } // parseLenMatch parses a LenMatch (like urilen). func parseLenMatch(k lenMatchType, s string) (*LenMatch, error) { m := new(LenMatch) m.Kind = k switch { // Simple case, no operators. case !strings.ContainsAny(s, "><"): // Ignore options after ','. numTmp := strings.Split(s, ",")[0] num, err := strconv.Atoi(strings.TrimSpace(numTmp)) if err != nil { return nil, fmt.Errorf("%v is not an integer", s) } m.Num = num // Leading operator, single number. case strings.HasPrefix(s, ">") || strings.HasPrefix(s, "<"): m.Operator = s[0:1] // Strip leading < or >. numTmp := strings.TrimLeft(s, "><") // Ignore options after ','. numTmp = strings.Split(numTmp, ",")[0] num, err := strconv.Atoi(strings.TrimSpace(numTmp)) if err != nil { return nil, fmt.Errorf("%v is not an integer", s) } m.Num = num // Min/Max center operator. case strings.Contains(s, "<>"): m.Operator = "<>" parts := strings.Split(s, "<>") if len(parts) != 2 { return nil, fmt.Errorf("must have exactly 2 parts for min/max operator. got %d", len(parts)) } var min, max int var err error min, err = strconv.Atoi(strings.TrimSpace(parts[0])) if err != nil { return nil, fmt.Errorf("%v is not an integer", strings.TrimSpace(parts[0])) } maxTmp := strings.Split(parts[1], ",")[0] max, err = strconv.Atoi(strings.TrimSpace(maxTmp)) if err != nil { return nil, fmt.Errorf("%v is not an integer", strings.TrimSpace(maxTmp)) } // Do stuff to handle options here. m.Min = min m.Max = max } // Parse options: if strings.Contains(s, ",") { opts := strings.Split(s, ",")[1:] for i, o := range opts { opts[i] = strings.TrimSpace(o) } m.Options = opts } return m, nil } func parseBase64Decode(k byteMatchType, s string) (*ByteMatch, error) { if k != b64Decode { return nil, fmt.Errorf("kind %v is not base64_decode", k) } b := new(ByteMatch) b.Kind = k // All options to base64_decode are optional, and specified by their keyword. for _, p := range strings.Split(s, ",") { v := strings.TrimSpace(p) switch { case strings.HasPrefix(v, "bytes"): b.NumBytes = strings.TrimSpace(strings.SplitAfter(v, "bytes")[1]) case strings.HasPrefix(v, "offset"): val := strings.TrimSpace(strings.SplitAfter(v, "offset")[1]) i, err := strconv.Atoi(val) if err != nil { return nil, fmt.Errorf("offset is not an int: %s; %s", val, err) } if i < 1 { return nil, fmt.Errorf("offset must be positive, non-zero values only") } b.Offset = i case strings.HasPrefix(v, "relative"): b.Options = []string{"relative"} } } return b, nil } // parseByteMatch parses a ByteMatch. func parseByteMatch(k byteMatchType, s string) (*ByteMatch, error) { b := new(ByteMatch) b.Kind = k parts := strings.Split(s, ",") // Num bytes is required for all byteMatchType keywords. if len(parts) < 1 { return nil, fmt.Errorf("%s keyword has %d parts", s, len(parts)) } b.NumBytes = strings.TrimSpace(parts[0]) if len(parts) < b.Kind.minLen() { return nil, fmt.Errorf("invalid %s length: %d", b.Kind, len(parts)) } if k == bExtract || k == bJump { // Parse offset. offset, err := strconv.Atoi(strings.TrimSpace(parts[1])) if err != nil { return nil, fmt.Errorf("%s offset is not an int: %v; %s", b.Kind, parts[1], err) } b.Offset = offset } if k == bExtract { // Parse variable name. name := parts[2] b.Variable = name } if k == bTest { // Parse operator. b.Operator = strings.TrimSpace(parts[1]) // Parse value. Can use a variable. b.Value = strings.TrimSpace(parts[2]) // Parse offset. offset, err := strconv.Atoi(strings.TrimSpace(parts[3])) if err != nil { return nil, fmt.Errorf("%s offset is not an int: %v; %s", b.Kind, parts[1], err) } b.Offset = offset } // The rest of the options, for all types not b64decode for i, l := b.Kind.minLen(), len(parts); i < l; i++ { parts[i] = strings.TrimSpace(parts[i]) b.Options = append(b.Options, parts[i]) } return b, nil } // parseFlowbit parses a flowbit. func parseFlowbit(s string) (*Flowbit, error) { parts := strings.Split(s, ",") if len(parts) < 1 { return nil, fmt.Errorf("couldn't parse flowbit string: %s", s) } // Ensure all actions are of valid type. a := strings.TrimSpace(parts[0]) if !inSlice(a, []string{"noalert", "isset", "isnotset", "set", "unset", "toggle"}) { return nil, fmt.Errorf("invalid action for flowbit: %s", a) } fb := &Flowbit{ Action: a, } if fb.Action == "noalert" && len(parts) > 1 { return nil, fmt.Errorf("noalert shouldn't have a value") } if len(parts) == 2 { fb.Value = strings.TrimSpace(parts[1]) } return fb, nil } // parseXbit parses an xbit. func parseXbit(s string) (*Xbit, error) { parts := strings.Split(s, ",") // All xbits must have an action, name and track if len(parts) < 3 { return nil, fmt.Errorf("not enough parts for xbits: %s", s) } // Ensure all actions are of valid type. a := strings.TrimSpace(parts[0]) if !inSlice(a, []string{"set", "unset", "isset", "isnotset", "toggle"}) { return nil, fmt.Errorf("invalid action for xbits: %s", a) } xb := &Xbit{ Action: a, Name: strings.TrimSpace(parts[1]), } // Track. t := strings.Fields(parts[2]) if len(t) != 2 { return nil, fmt.Errorf("wrong number of parts for track: %v", t) } if t[0] != "track" { return nil, fmt.Errorf("%s should be 'track'", t[0]) } xb.Track = t[1] // Expire if len(parts) == 4 { e := strings.Fields(parts[3]) if len(e) != 2 { return nil, fmt.Errorf("wrong number of parts for expire: %v", e) } if e[0] != "expire" { return nil, fmt.Errorf("%s should be 'expire'", e[0]) } xb.Expire = e[1] } return xb, nil } // parseFlowint parses a flowint. func parseFlowint(s string) (*Flowint, error) { parts := strings.Split(s, ",") // All flowints must have a name and modifier if len(parts) < 2 { return nil, fmt.Errorf("not enough parts for flowint: %s", s) } // Ensure all actions are of valid type. m := strings.TrimSpace(parts[1]) if !inSlice(m, []string{"+", "-", "=", ">", "<", ">=", "<=", "==", "!=", "isset", "isnotset"}) { return nil, fmt.Errorf("invalid modifier for flowint: %s", m) } fi := &Flowint{ Name: strings.TrimSpace(parts[0]), Modifier: m, } if len(parts) == 3 { fi.Value = strings.TrimSpace(parts[2]) } return fi, nil } // containsUnescaped checks content whether special characters are properly escaped. func containsUnescaped(s string) bool { esc := false for _, b := range s { if esc { switch b { case '\\', ';', '"', ':': esc = false default: return true } } else { switch b { case '\\': esc = true case ';', '"': return true } } } return esc } func unquote(s string) string { if strings.IndexByte(s, '"') < 0 { return s } return strings.Replace(s, `\"`, `"`, -1) } func inSlice(str string, strings []string) bool { for _, k := range strings { if str == k { return true } } return false } // comment decodes a comment (commented rule, or just a comment.) func (r *Rule) comment(key item, l *lexer) error { if key.typ != itemComment { panic("item is not a comment") } if r.Disabled { // ignoring comment for rule with empty action return nil } rule, err := parseRuleAux(key.value, true) // If there was an error this means the comment is not a rule. if err != nil { return fmt.Errorf("this is not a rule: %s", err) } // We parsed a rule, this was a comment so set the rule to disabled. rule.Disabled = true // Overwrite the rule we're working on with the recently parsed, disabled rule. *r = *rule return nil } // action decodes an IDS rule option based on its key. func (r *Rule) action(key item, l *lexer) error { if key.typ != itemAction { panic("item is not an action") } if !inSlice(key.value, []string{"alert", "drop", "pass"}) { return fmt.Errorf("invalid action: %v", key.value) } r.Action = key.value return nil } // protocol decodes an IDS rule protocol based on its key. func (r *Rule) protocol(key item, l *lexer) error { if key.typ != itemProtocol { panic("item is not a protocol") } if !inSlice(key.value, appLayerProtocols) { return fmt.Errorf("invalid protocol: %v", key.value) } r.Protocol = key.value return nil } // network decodes an IDS rule network (networks and ports) based on its key. func (r *Rule) network(key item, l *lexer) error { // This is a hack. We use a regexp to replace the outer `,` with `___` // to give us a discrete string to split on, avoiding the inner `,`. // Specify TrimSuffix and TrimPrefix to ensure only one instance of `[` and `]` are trimmed. tmp := strings.TrimSuffix(strings.TrimPrefix(key.value, "["), "]") items := strings.Split(nestedNetRE.ReplaceAllString(tmp, "___${1}"), "___") // Validate that no items contain spaces. for _, i := range items { if len(strings.Fields(i)) > 1 || len(strings.TrimSpace(i)) != len(i) { return fmt.Errorf("network component contains spaces: %v", i) } } switch key.typ { case itemSourceAddress: if validNetworks(items) { r.Source.Nets = append(r.Source.Nets, items...) } else { return fmt.Errorf("some or all source ips are invalid: %v", items) } case itemSourcePort: if portsValid(items) { r.Source.Ports = append(r.Source.Ports, items...) } else { return fmt.Errorf("some or all source ports are invalid: %v", items) } case itemDestinationAddress: if validNetworks(items) { r.Destination.Nets = append(r.Destination.Nets, items...) } else { return fmt.Errorf("some or all destination ips are invalid: %v", items) } case itemDestinationPort: if portsValid(items) { r.Destination.Ports = append(r.Destination.Ports, items...) } else { return fmt.Errorf("some or all destination ports are invalid: %v", items) } default: panic("item is not a network component") } return nil } // Validate that every item is between 1 and 65535. func portsValid(p []string) bool
// Validate item is either a valid ip or ip range. func validNetwork(i string) bool { _, _, err := net.ParseCIDR(i) if err == nil { return true } if net.ParseIP(i) != nil { return true } return false } // Validate every item is either a valid ip or ip range. func validNetworks(nets []string) bool { for _, net := range nets { if strings.Count(net, "[") != strings.Count(net, "]") { // unbalanced groups. return false } net = strings.TrimPrefix(net, "!") // If this network is a grouping, check the inner group. if strings.HasPrefix(net, "[") || strings.Contains(net, ",") { if validNetworks(strings.Split(strings.Trim(net, "[]"), ",")) { continue } return false } switch { case net == "any": continue case strings.HasPrefix(net, "$"): continue case !validNetwork(net): return false } } return true } // direction decodes an IDS rule direction based on its key. func (r *Rule) direction(key item, l *lexer) error { if key.typ != itemDirection { panic("item is not a direction") } switch key.value { case "->": r.Bidirectional = false case "<>": r.Bidirectional = true default: return fmt.Errorf("invalid direction operator %q", key.value) } return nil } var dataPosition = pktData // option decodes an IDS rule option based on its key. func (r *Rule) option(key item, l *lexer) error { if key.typ != itemOptionKey { panic("item is not an option key") } switch { // TODO: Many of these simple tags could be factored into nicer structures. case inSlice(key.value, []string{"classtype", "flow", "tag", "priority", "app-layer-protocol", "noalert", "target", "flags", "ipopts", "ip_proto", "geoip", "fragbits", "fragoffset", "tos", "window", "threshold", "detection_filter", "dce_iface", "dce_opnum", "dce_stub_data", "asn1"}): nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return fmt.Errorf("no valid value for %s tag", key.value) } if r.Tags == nil { r.Tags = make(map[string]string) } r.Tags[key.value] = nextItem.value case inSlice(key.value, []string{"sameip", "tls.store", "ftpbounce"}): r.Statements = append(r.Statements, key.value) case inSlice(key.value, tlsTags): t := &TLSTag{ Key: key.value, } nextItem := l.nextItem() if nextItem.typ == itemNot { t.Negate = true nextItem = l.nextItem() } t.Value = nextItem.value r.TLSTags = append(r.TLSTags, t) case key.value == "stream_size": nextItem := l.nextItem() parts := strings.Split(nextItem.value, ",") if len(parts) != 3 { return fmt.Errorf("invalid number of parts for stream_size: %d", len(parts)) } num, err := strconv.Atoi(strings.TrimSpace(parts[2])) if err != nil { return fmt.Errorf("comparison number is not an integer: %v", parts[2]) } r.StreamMatch = &StreamCmp{ Direction: parts[0], Operator: parts[1], Number: num, } case key.value == "reference": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no valid value for reference") } refs := strings.SplitN(nextItem.value, ",", 2) if len(refs) != 2 { return fmt.Errorf("invalid reference definition: %s", refs) } r.References = append(r.References, &Reference{Type: refs[0], Value: refs[1]}) case key.value == "metadata": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no valid value for metadata") } metas := metaSplitRE.Split(nextItem.value, -1) for _, kv := range metas { metaTmp := strings.SplitN(kv, " ", 2) if len(metaTmp) != 2 { return fmt.Errorf("invalid metadata definition: %s", metaTmp) } r.Metas = append(r.Metas, &Metadata{Key: strings.TrimSpace(metaTmp[0]), Value: strings.TrimSpace(metaTmp[1])}) } case key.value == "sid": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no value for option sid") } sid, err := strconv.Atoi(nextItem.value) if err != nil { return fmt.Errorf("invalid sid %s", nextItem.value) } r.SID = sid case key.value == "rev": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no value for option rev") } rev, err := strconv.Atoi(nextItem.value) if err != nil { return fmt.Errorf("invalid rev %s", nextItem.value) } r.Revision = rev case key.value == "msg": nextItem := l.nextItem() if nextItem.typ != itemOptionValueString { return errors.New("no value for option msg") } r.Description = nextItem.value case isStickyBuffer(key.value): var d DataPos var err error if d, err = StickyBuffer(key.value); err != nil { return err } dataPosition = d case inSlice(key.value, []string{"content", "uricontent"}): nextItem := l.nextItem() negate := false if nextItem.typ == itemNot { nextItem = l.nextItem() negate = true } if nextItem.typ == itemOptionValueString { c, err := parseContent(nextItem.value) if err != nil { return err } var options []*ContentOption if key.value == "uricontent" { options = append(options, &ContentOption{Name: "http_uri"}) } con := &Content{ DataPosition: dataPosition, Pattern: c, Negate: negate, Options: options, } r.Matchers = append(r.Matchers, con) } else { return fmt.Errorf("invalid type %q for option content", nextItem.typ) } case inSlice(key.value, []string{"http_cookie", "http_raw_cookie", "http_method", "http_header", "http_raw_header", "http_uri", "http_raw_uri", "http_user_agent", "http_stat_code", "http_stat_msg", "http_client_body", "http_server_body", "http_host", "nocase", "rawbytes", "startswith", "endswith"}): lastContent := r.LastContent() if lastContent == nil { return fmt.Errorf("invalid content option %q with no content match", key.value) } lastContent.Options = append(lastContent.Options, &ContentOption{Name: key.value}) case inSlice(key.value, []string{"depth", "distance", "offset", "within"}): lastContent := r.LastContent() if lastContent == nil { return fmt.Errorf("invalid content option %q with no content match", key.value) } nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return fmt.Errorf("no value for content option %s", key.value) } lastContent.Options = append(lastContent.Options, &ContentOption{Name: key.value, Value: nextItem.value}) case key.value == "fast_pattern": lastContent := r.LastContent() if lastContent == nil { return fmt.Errorf("invalid content option %q with no content match", key.value) } var ( only bool offset int length int ) nextItem := l.nextItem() if nextItem.typ == itemOptionValue { v := nextItem.value switch { case v == "only": only = true case strings.Contains(v, ","): s := strings.Split(v, ",") i, err := strconv.Atoi(s[0]) if err != nil { return fmt.Errorf("fast_pattern offset is not an int: %s; %s", s[0], err) } offset = i i, err = strconv.Atoi(s[1]) if err != nil { return fmt.Errorf("fast_pattern length is not an int: %s; %s", s[1], err) } length = i } } lastContent.FastPattern = FastPattern{true, only, offset, length} case key.value == "pcre": nextItem := l.nextItem() negate := false if nextItem.typ == itemNot { nextItem = l.nextItem() negate = true } if nextItem.typ == itemOptionValueString { p, err := parsePCRE(unquote(nextItem.value)) if err != nil { return err } p.DataPosition = dataPosition p.Negate = negate r.Matchers = append(r.Matchers, p) } else { return fmt.Errorf("invalid type %q for option content", nextItem.typ) } case inSlice(key.value, allbyteMatchTypeNames()): k, err := byteMatcher(key.value) if err != nil { return fmt.Errorf("%s is not a supported byteMatchType keyword", key.value) } // Handle negation logic here, don't want to pass lexer to parseByteMatch. nextItem := l.nextItem() var negate bool if k == isDataAt && nextItem.typ == itemNot { negate = true nextItem = l.nextItem() } var b *ByteMatch // Parse base64_decode differently as it has odd semantics. if k == b64Decode { b, err = parseBase64Decode(k, nextItem.value) if err != nil { return fmt.Errorf("could not parse base64Decode: %v", err) } // base64_decode allows NumBytes to be empty, an int or a variable. if i, err := strconv.Atoi(b.NumBytes); err != nil && b.NumBytes != "" { // NumBytes is not an int, check if it is a variable from byte_extract. if !r.HasVar(b.NumBytes) { return fmt.Errorf("number of bytes is not an int, or an extracted variable: %s; %s", b.NumBytes, err) } else if i < 1 { return fmt.Errorf("bytes must be positive, non-zero values only: %d", i) } } } else { b, err = parseByteMatch(k, nextItem.value) if err != nil { return fmt.Errorf("could not parse byteMatch: %v", err) } if _, err := strconv.Atoi(b.NumBytes); err != nil { // NumBytes is not an int, check if it is a variable from byte_extract. if !r.HasVar(b.NumBytes) { return fmt.Errorf("number of bytes is not an int, or an extracted variable: %s; %s", b.NumBytes, err) } } } b.Negate = negate r.Matchers = append(r.Matchers, b) case inSlice(key.value, allLenMatchTypeNames()): k, err := lenMatcher(key.value) if err != nil { return fmt.Errorf("%s is not a support lenMatch keyword", key.value) } nextItem := l.nextItem() m, err := parseLenMatch(k, nextItem.value) if err != nil { return fmt.Errorf("could not parse LenMatch: %v", err) } m.DataPosition = dataPosition r.Matchers = append(r.Matchers, m) case key.value == "flowbits": nextItem := l.nextItem() fb, err := parseFlowbit(nextItem.value) if err != nil { return fmt.Errorf("error parsing flowbit: %v", err) } r.Flowbits = append(r.Flowbits, fb) case key.value == "xbits": nextItem := l.nextItem() xb, err := parseXbit(nextItem.value) if err != nil { return fmt.Errorf("error parsing xbits: %v", err) } r.Xbits = append(r.Xbits, xb) case key.value == "flowint": nextItem := l.nextItem() fi, err := parseFlowint(nextItem.value) if err != nil { return fmt.Errorf("error parsing flowint: %v", err) } r.Flowints = append(r.Flowints, fi) default: return &UnsupportedOptionError{ Options: []string{key.value}, } } return nil } // UnsupportedOptionError contains a partially parsed rule, and the options that aren't // supported for parsing. type UnsupportedOptionError struct { Rule *Rule Options []string } // Error returns a string for UnsupportedOptionError func (uoe *UnsupportedOptionError) Error() string { return fmt.Sprintf("rule contains unsupported option(s): %s", strings.Join(uoe.Options, ",")) } // parseRuleAux parses an IDS rule, optionally ignoring comments. func parseRuleAux(rule string, commented bool) (*Rule, error) { l, err := lex(rule) if err != nil { return nil, err } defer l.close() dataPosition = pktData r := &Rule{} var unsupportedOptions = make([]string, 0, 3) for item := l.nextItem(); item.typ != itemEOR && item.typ != itemEOF && err == nil; item = l.nextItem() { switch item.typ { case itemComment: if r.Action != "" || commented { // Ignore comment ending rule. return r, nil } err = r.comment(item, l) // Error here means that the comment was not a commented rule. // So we're not parsing a rule and we need to break out. if err != nil { break } // This line was a commented rule. return r, nil case itemAction: err = r.action(item, l) case itemProtocol: err = r.protocol(item, l) case itemSourceAddress, itemDestinationAddress, itemSourcePort, itemDestinationPort: err = r.network(item, l) case itemDirection: err = r.direction(item, l) case itemOptionKey: err = r.option(item, l) // We will continue to parse a rule with unsupported options. if uerr, ok := err.(*UnsupportedOptionError); ok { unsupportedOptions = append(unsupportedOptions, uerr.Options...) // This is ugly but allows the parsing to continue. err = nil } case itemError: err = errors.New(item.value) } // Unrecoverable parse error. if err != nil { return nil, err } } // If we encountered one or more unsupported keys, return an UnsupportedOptionError. if len(unsupportedOptions) > 0 { return nil, &UnsupportedOptionError{ Rule: r, Options: unsupportedOptions, } } return r, nil } // ParseRule parses an IDS rule and returns a struct describing the rule. func ParseRule(rule string) (*Rule, error) { return parseRuleAux(rule, false) }
{ for _, u := range p { if strings.Count(u, "[") != strings.Count(u, "]") { // unbalanced groups. return false } u = strings.TrimPrefix(u, "!") // If this port range is a grouping, check the inner group. if strings.HasPrefix(u, "[") { if portsValid(strings.Split(strings.Trim(u, "[]"), ",")) { continue } return false } ports := portSplitRE.Split(u, -1) for _, port := range ports { port = strings.TrimPrefix(port, "!") if port == "any" || port == "" || strings.HasPrefix(port, "$") { continue } x, err := strconv.Atoi(port) if err != nil { return false } if x > 65535 || x < 0 { return false } } } return true }
identifier_body
parser.go
/* Copyright 2016 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package gonids implements a basic parser of IDS rules. // // For now the parser is very basic and it only parses a subset of fields. // We intentionally omit http_encode as it doesn't seem to be used in practice. package gonids import ( "encoding/hex" "errors" "fmt" "net" "regexp" "strconv" "strings" ) // hexRE matches on hexadecimal content like |41 41 41| for example. var hexRE = regexp.MustCompile(`(?i)(\|(?:\s*[a-f0-9]{2}\s*)+\|)`) // escapeRE matches char that needs to escaped in regexp. var escapeRE = regexp.MustCompile(`([()+.'\\])`) // escapeContent matches escaped special characters. var escapeContent = regexp.MustCompile(`\\([\\;":])`) // metaSplitRE matches string in metadata. var metaSplitRE = regexp.MustCompile(`,\s*`) // nestedNetRE matches nested network groups. var nestedNetRE = regexp.MustCompile(`,(!?\[[^]]*\])`) // portSplitRE splits port lists and ranges for validation. var portSplitRE = regexp.MustCompile(`[:,]`) var appLayerProtocols = []string{ "dcerpc", "dhcp", "dnp3", "dns", "enip", "ftp", "ftp-data", "http", "http2", "icmp", "icmpv4", "icmpv6", "ikev2", "imap", "ip", "ip4", "ip6", "ipv4", "ipv6", "irc", "jabber", "krb5", "modbus", "mqtt", "nfs", "ntp", "pkthdr", "rdp", "rfb", "sctp", "sip", "smb", "smtp", "snmp", "ssh", "tcp", "tcp-pkt", "tcp-stream", "tftp", "tls", "udp", } // parseContent decodes rule content match. For now it only takes care of escaped and hex // encoded content. func
(content string) ([]byte, error) { // Decode and replace all occurrences of hexadecimal content. var errpanic error defer func() { r := recover() if r != nil { errpanic = fmt.Errorf("recovered from panic: %v", r) } }() if containsUnescaped(content) { return nil, fmt.Errorf("invalid special characters escaping") } b := escapeContent.ReplaceAllString(content, "$1") b = hexRE.ReplaceAllStringFunc(b, func(h string) string { r, err := hex.DecodeString(strings.Replace(strings.Trim(h, "|"), " ", "", -1)) if err != nil { panic("invalid hexRE regexp") } return string(r) }) return []byte(b), errpanic } // parsePCRE parses the components of a PCRE. Returns PCRE struct. func parsePCRE(s string) (*PCRE, error) { c := strings.Count(s, "/") if c < 2 { return nil, fmt.Errorf("all pcre patterns must contain at least 2 '/', found: %d", c) } l := strings.LastIndex(s, "/") if l < 0 { return nil, fmt.Errorf("couldn't find options in PCRE") } i := strings.Index(s, "/") if l < 0 { return nil, fmt.Errorf("couldn't find start of pattern") } return &PCRE{ Pattern: []byte(s[i+1 : l]), Options: []byte(s[l+1:]), }, nil } // parseLenMatch parses a LenMatch (like urilen). func parseLenMatch(k lenMatchType, s string) (*LenMatch, error) { m := new(LenMatch) m.Kind = k switch { // Simple case, no operators. case !strings.ContainsAny(s, "><"): // Ignore options after ','. numTmp := strings.Split(s, ",")[0] num, err := strconv.Atoi(strings.TrimSpace(numTmp)) if err != nil { return nil, fmt.Errorf("%v is not an integer", s) } m.Num = num // Leading operator, single number. case strings.HasPrefix(s, ">") || strings.HasPrefix(s, "<"): m.Operator = s[0:1] // Strip leading < or >. numTmp := strings.TrimLeft(s, "><") // Ignore options after ','. numTmp = strings.Split(numTmp, ",")[0] num, err := strconv.Atoi(strings.TrimSpace(numTmp)) if err != nil { return nil, fmt.Errorf("%v is not an integer", s) } m.Num = num // Min/Max center operator. case strings.Contains(s, "<>"): m.Operator = "<>" parts := strings.Split(s, "<>") if len(parts) != 2 { return nil, fmt.Errorf("must have exactly 2 parts for min/max operator. got %d", len(parts)) } var min, max int var err error min, err = strconv.Atoi(strings.TrimSpace(parts[0])) if err != nil { return nil, fmt.Errorf("%v is not an integer", strings.TrimSpace(parts[0])) } maxTmp := strings.Split(parts[1], ",")[0] max, err = strconv.Atoi(strings.TrimSpace(maxTmp)) if err != nil { return nil, fmt.Errorf("%v is not an integer", strings.TrimSpace(maxTmp)) } // Do stuff to handle options here. m.Min = min m.Max = max } // Parse options: if strings.Contains(s, ",") { opts := strings.Split(s, ",")[1:] for i, o := range opts { opts[i] = strings.TrimSpace(o) } m.Options = opts } return m, nil } func parseBase64Decode(k byteMatchType, s string) (*ByteMatch, error) { if k != b64Decode { return nil, fmt.Errorf("kind %v is not base64_decode", k) } b := new(ByteMatch) b.Kind = k // All options to base64_decode are optional, and specified by their keyword. for _, p := range strings.Split(s, ",") { v := strings.TrimSpace(p) switch { case strings.HasPrefix(v, "bytes"): b.NumBytes = strings.TrimSpace(strings.SplitAfter(v, "bytes")[1]) case strings.HasPrefix(v, "offset"): val := strings.TrimSpace(strings.SplitAfter(v, "offset")[1]) i, err := strconv.Atoi(val) if err != nil { return nil, fmt.Errorf("offset is not an int: %s; %s", val, err) } if i < 1 { return nil, fmt.Errorf("offset must be positive, non-zero values only") } b.Offset = i case strings.HasPrefix(v, "relative"): b.Options = []string{"relative"} } } return b, nil } // parseByteMatch parses a ByteMatch. func parseByteMatch(k byteMatchType, s string) (*ByteMatch, error) { b := new(ByteMatch) b.Kind = k parts := strings.Split(s, ",") // Num bytes is required for all byteMatchType keywords. if len(parts) < 1 { return nil, fmt.Errorf("%s keyword has %d parts", s, len(parts)) } b.NumBytes = strings.TrimSpace(parts[0]) if len(parts) < b.Kind.minLen() { return nil, fmt.Errorf("invalid %s length: %d", b.Kind, len(parts)) } if k == bExtract || k == bJump { // Parse offset. offset, err := strconv.Atoi(strings.TrimSpace(parts[1])) if err != nil { return nil, fmt.Errorf("%s offset is not an int: %v; %s", b.Kind, parts[1], err) } b.Offset = offset } if k == bExtract { // Parse variable name. name := parts[2] b.Variable = name } if k == bTest { // Parse operator. b.Operator = strings.TrimSpace(parts[1]) // Parse value. Can use a variable. b.Value = strings.TrimSpace(parts[2]) // Parse offset. offset, err := strconv.Atoi(strings.TrimSpace(parts[3])) if err != nil { return nil, fmt.Errorf("%s offset is not an int: %v; %s", b.Kind, parts[1], err) } b.Offset = offset } // The rest of the options, for all types not b64decode for i, l := b.Kind.minLen(), len(parts); i < l; i++ { parts[i] = strings.TrimSpace(parts[i]) b.Options = append(b.Options, parts[i]) } return b, nil } // parseFlowbit parses a flowbit. func parseFlowbit(s string) (*Flowbit, error) { parts := strings.Split(s, ",") if len(parts) < 1 { return nil, fmt.Errorf("couldn't parse flowbit string: %s", s) } // Ensure all actions are of valid type. a := strings.TrimSpace(parts[0]) if !inSlice(a, []string{"noalert", "isset", "isnotset", "set", "unset", "toggle"}) { return nil, fmt.Errorf("invalid action for flowbit: %s", a) } fb := &Flowbit{ Action: a, } if fb.Action == "noalert" && len(parts) > 1 { return nil, fmt.Errorf("noalert shouldn't have a value") } if len(parts) == 2 { fb.Value = strings.TrimSpace(parts[1]) } return fb, nil } // parseXbit parses an xbit. func parseXbit(s string) (*Xbit, error) { parts := strings.Split(s, ",") // All xbits must have an action, name and track if len(parts) < 3 { return nil, fmt.Errorf("not enough parts for xbits: %s", s) } // Ensure all actions are of valid type. a := strings.TrimSpace(parts[0]) if !inSlice(a, []string{"set", "unset", "isset", "isnotset", "toggle"}) { return nil, fmt.Errorf("invalid action for xbits: %s", a) } xb := &Xbit{ Action: a, Name: strings.TrimSpace(parts[1]), } // Track. t := strings.Fields(parts[2]) if len(t) != 2 { return nil, fmt.Errorf("wrong number of parts for track: %v", t) } if t[0] != "track" { return nil, fmt.Errorf("%s should be 'track'", t[0]) } xb.Track = t[1] // Expire if len(parts) == 4 { e := strings.Fields(parts[3]) if len(e) != 2 { return nil, fmt.Errorf("wrong number of parts for expire: %v", e) } if e[0] != "expire" { return nil, fmt.Errorf("%s should be 'expire'", e[0]) } xb.Expire = e[1] } return xb, nil } // parseFlowint parses a flowint. func parseFlowint(s string) (*Flowint, error) { parts := strings.Split(s, ",") // All flowints must have a name and modifier if len(parts) < 2 { return nil, fmt.Errorf("not enough parts for flowint: %s", s) } // Ensure all actions are of valid type. m := strings.TrimSpace(parts[1]) if !inSlice(m, []string{"+", "-", "=", ">", "<", ">=", "<=", "==", "!=", "isset", "isnotset"}) { return nil, fmt.Errorf("invalid modifier for flowint: %s", m) } fi := &Flowint{ Name: strings.TrimSpace(parts[0]), Modifier: m, } if len(parts) == 3 { fi.Value = strings.TrimSpace(parts[2]) } return fi, nil } // containsUnescaped checks content whether special characters are properly escaped. func containsUnescaped(s string) bool { esc := false for _, b := range s { if esc { switch b { case '\\', ';', '"', ':': esc = false default: return true } } else { switch b { case '\\': esc = true case ';', '"': return true } } } return esc } func unquote(s string) string { if strings.IndexByte(s, '"') < 0 { return s } return strings.Replace(s, `\"`, `"`, -1) } func inSlice(str string, strings []string) bool { for _, k := range strings { if str == k { return true } } return false } // comment decodes a comment (commented rule, or just a comment.) func (r *Rule) comment(key item, l *lexer) error { if key.typ != itemComment { panic("item is not a comment") } if r.Disabled { // ignoring comment for rule with empty action return nil } rule, err := parseRuleAux(key.value, true) // If there was an error this means the comment is not a rule. if err != nil { return fmt.Errorf("this is not a rule: %s", err) } // We parsed a rule, this was a comment so set the rule to disabled. rule.Disabled = true // Overwrite the rule we're working on with the recently parsed, disabled rule. *r = *rule return nil } // action decodes an IDS rule option based on its key. func (r *Rule) action(key item, l *lexer) error { if key.typ != itemAction { panic("item is not an action") } if !inSlice(key.value, []string{"alert", "drop", "pass"}) { return fmt.Errorf("invalid action: %v", key.value) } r.Action = key.value return nil } // protocol decodes an IDS rule protocol based on its key. func (r *Rule) protocol(key item, l *lexer) error { if key.typ != itemProtocol { panic("item is not a protocol") } if !inSlice(key.value, appLayerProtocols) { return fmt.Errorf("invalid protocol: %v", key.value) } r.Protocol = key.value return nil } // network decodes an IDS rule network (networks and ports) based on its key. func (r *Rule) network(key item, l *lexer) error { // This is a hack. We use a regexp to replace the outer `,` with `___` // to give us a discrete string to split on, avoiding the inner `,`. // Specify TrimSuffix and TrimPrefix to ensure only one instance of `[` and `]` are trimmed. tmp := strings.TrimSuffix(strings.TrimPrefix(key.value, "["), "]") items := strings.Split(nestedNetRE.ReplaceAllString(tmp, "___${1}"), "___") // Validate that no items contain spaces. for _, i := range items { if len(strings.Fields(i)) > 1 || len(strings.TrimSpace(i)) != len(i) { return fmt.Errorf("network component contains spaces: %v", i) } } switch key.typ { case itemSourceAddress: if validNetworks(items) { r.Source.Nets = append(r.Source.Nets, items...) } else { return fmt.Errorf("some or all source ips are invalid: %v", items) } case itemSourcePort: if portsValid(items) { r.Source.Ports = append(r.Source.Ports, items...) } else { return fmt.Errorf("some or all source ports are invalid: %v", items) } case itemDestinationAddress: if validNetworks(items) { r.Destination.Nets = append(r.Destination.Nets, items...) } else { return fmt.Errorf("some or all destination ips are invalid: %v", items) } case itemDestinationPort: if portsValid(items) { r.Destination.Ports = append(r.Destination.Ports, items...) } else { return fmt.Errorf("some or all destination ports are invalid: %v", items) } default: panic("item is not a network component") } return nil } // Validate that every item is between 1 and 65535. func portsValid(p []string) bool { for _, u := range p { if strings.Count(u, "[") != strings.Count(u, "]") { // unbalanced groups. return false } u = strings.TrimPrefix(u, "!") // If this port range is a grouping, check the inner group. if strings.HasPrefix(u, "[") { if portsValid(strings.Split(strings.Trim(u, "[]"), ",")) { continue } return false } ports := portSplitRE.Split(u, -1) for _, port := range ports { port = strings.TrimPrefix(port, "!") if port == "any" || port == "" || strings.HasPrefix(port, "$") { continue } x, err := strconv.Atoi(port) if err != nil { return false } if x > 65535 || x < 0 { return false } } } return true } // Validate item is either a valid ip or ip range. func validNetwork(i string) bool { _, _, err := net.ParseCIDR(i) if err == nil { return true } if net.ParseIP(i) != nil { return true } return false } // Validate every item is either a valid ip or ip range. func validNetworks(nets []string) bool { for _, net := range nets { if strings.Count(net, "[") != strings.Count(net, "]") { // unbalanced groups. return false } net = strings.TrimPrefix(net, "!") // If this network is a grouping, check the inner group. if strings.HasPrefix(net, "[") || strings.Contains(net, ",") { if validNetworks(strings.Split(strings.Trim(net, "[]"), ",")) { continue } return false } switch { case net == "any": continue case strings.HasPrefix(net, "$"): continue case !validNetwork(net): return false } } return true } // direction decodes an IDS rule direction based on its key. func (r *Rule) direction(key item, l *lexer) error { if key.typ != itemDirection { panic("item is not a direction") } switch key.value { case "->": r.Bidirectional = false case "<>": r.Bidirectional = true default: return fmt.Errorf("invalid direction operator %q", key.value) } return nil } var dataPosition = pktData // option decodes an IDS rule option based on its key. func (r *Rule) option(key item, l *lexer) error { if key.typ != itemOptionKey { panic("item is not an option key") } switch { // TODO: Many of these simple tags could be factored into nicer structures. case inSlice(key.value, []string{"classtype", "flow", "tag", "priority", "app-layer-protocol", "noalert", "target", "flags", "ipopts", "ip_proto", "geoip", "fragbits", "fragoffset", "tos", "window", "threshold", "detection_filter", "dce_iface", "dce_opnum", "dce_stub_data", "asn1"}): nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return fmt.Errorf("no valid value for %s tag", key.value) } if r.Tags == nil { r.Tags = make(map[string]string) } r.Tags[key.value] = nextItem.value case inSlice(key.value, []string{"sameip", "tls.store", "ftpbounce"}): r.Statements = append(r.Statements, key.value) case inSlice(key.value, tlsTags): t := &TLSTag{ Key: key.value, } nextItem := l.nextItem() if nextItem.typ == itemNot { t.Negate = true nextItem = l.nextItem() } t.Value = nextItem.value r.TLSTags = append(r.TLSTags, t) case key.value == "stream_size": nextItem := l.nextItem() parts := strings.Split(nextItem.value, ",") if len(parts) != 3 { return fmt.Errorf("invalid number of parts for stream_size: %d", len(parts)) } num, err := strconv.Atoi(strings.TrimSpace(parts[2])) if err != nil { return fmt.Errorf("comparison number is not an integer: %v", parts[2]) } r.StreamMatch = &StreamCmp{ Direction: parts[0], Operator: parts[1], Number: num, } case key.value == "reference": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no valid value for reference") } refs := strings.SplitN(nextItem.value, ",", 2) if len(refs) != 2 { return fmt.Errorf("invalid reference definition: %s", refs) } r.References = append(r.References, &Reference{Type: refs[0], Value: refs[1]}) case key.value == "metadata": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no valid value for metadata") } metas := metaSplitRE.Split(nextItem.value, -1) for _, kv := range metas { metaTmp := strings.SplitN(kv, " ", 2) if len(metaTmp) != 2 { return fmt.Errorf("invalid metadata definition: %s", metaTmp) } r.Metas = append(r.Metas, &Metadata{Key: strings.TrimSpace(metaTmp[0]), Value: strings.TrimSpace(metaTmp[1])}) } case key.value == "sid": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no value for option sid") } sid, err := strconv.Atoi(nextItem.value) if err != nil { return fmt.Errorf("invalid sid %s", nextItem.value) } r.SID = sid case key.value == "rev": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no value for option rev") } rev, err := strconv.Atoi(nextItem.value) if err != nil { return fmt.Errorf("invalid rev %s", nextItem.value) } r.Revision = rev case key.value == "msg": nextItem := l.nextItem() if nextItem.typ != itemOptionValueString { return errors.New("no value for option msg") } r.Description = nextItem.value case isStickyBuffer(key.value): var d DataPos var err error if d, err = StickyBuffer(key.value); err != nil { return err } dataPosition = d case inSlice(key.value, []string{"content", "uricontent"}): nextItem := l.nextItem() negate := false if nextItem.typ == itemNot { nextItem = l.nextItem() negate = true } if nextItem.typ == itemOptionValueString { c, err := parseContent(nextItem.value) if err != nil { return err } var options []*ContentOption if key.value == "uricontent" { options = append(options, &ContentOption{Name: "http_uri"}) } con := &Content{ DataPosition: dataPosition, Pattern: c, Negate: negate, Options: options, } r.Matchers = append(r.Matchers, con) } else { return fmt.Errorf("invalid type %q for option content", nextItem.typ) } case inSlice(key.value, []string{"http_cookie", "http_raw_cookie", "http_method", "http_header", "http_raw_header", "http_uri", "http_raw_uri", "http_user_agent", "http_stat_code", "http_stat_msg", "http_client_body", "http_server_body", "http_host", "nocase", "rawbytes", "startswith", "endswith"}): lastContent := r.LastContent() if lastContent == nil { return fmt.Errorf("invalid content option %q with no content match", key.value) } lastContent.Options = append(lastContent.Options, &ContentOption{Name: key.value}) case inSlice(key.value, []string{"depth", "distance", "offset", "within"}): lastContent := r.LastContent() if lastContent == nil { return fmt.Errorf("invalid content option %q with no content match", key.value) } nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return fmt.Errorf("no value for content option %s", key.value) } lastContent.Options = append(lastContent.Options, &ContentOption{Name: key.value, Value: nextItem.value}) case key.value == "fast_pattern": lastContent := r.LastContent() if lastContent == nil { return fmt.Errorf("invalid content option %q with no content match", key.value) } var ( only bool offset int length int ) nextItem := l.nextItem() if nextItem.typ == itemOptionValue { v := nextItem.value switch { case v == "only": only = true case strings.Contains(v, ","): s := strings.Split(v, ",") i, err := strconv.Atoi(s[0]) if err != nil { return fmt.Errorf("fast_pattern offset is not an int: %s; %s", s[0], err) } offset = i i, err = strconv.Atoi(s[1]) if err != nil { return fmt.Errorf("fast_pattern length is not an int: %s; %s", s[1], err) } length = i } } lastContent.FastPattern = FastPattern{true, only, offset, length} case key.value == "pcre": nextItem := l.nextItem() negate := false if nextItem.typ == itemNot { nextItem = l.nextItem() negate = true } if nextItem.typ == itemOptionValueString { p, err := parsePCRE(unquote(nextItem.value)) if err != nil { return err } p.DataPosition = dataPosition p.Negate = negate r.Matchers = append(r.Matchers, p) } else { return fmt.Errorf("invalid type %q for option content", nextItem.typ) } case inSlice(key.value, allbyteMatchTypeNames()): k, err := byteMatcher(key.value) if err != nil { return fmt.Errorf("%s is not a supported byteMatchType keyword", key.value) } // Handle negation logic here, don't want to pass lexer to parseByteMatch. nextItem := l.nextItem() var negate bool if k == isDataAt && nextItem.typ == itemNot { negate = true nextItem = l.nextItem() } var b *ByteMatch // Parse base64_decode differently as it has odd semantics. if k == b64Decode { b, err = parseBase64Decode(k, nextItem.value) if err != nil { return fmt.Errorf("could not parse base64Decode: %v", err) } // base64_decode allows NumBytes to be empty, an int or a variable. if i, err := strconv.Atoi(b.NumBytes); err != nil && b.NumBytes != "" { // NumBytes is not an int, check if it is a variable from byte_extract. if !r.HasVar(b.NumBytes) { return fmt.Errorf("number of bytes is not an int, or an extracted variable: %s; %s", b.NumBytes, err) } else if i < 1 { return fmt.Errorf("bytes must be positive, non-zero values only: %d", i) } } } else { b, err = parseByteMatch(k, nextItem.value) if err != nil { return fmt.Errorf("could not parse byteMatch: %v", err) } if _, err := strconv.Atoi(b.NumBytes); err != nil { // NumBytes is not an int, check if it is a variable from byte_extract. if !r.HasVar(b.NumBytes) { return fmt.Errorf("number of bytes is not an int, or an extracted variable: %s; %s", b.NumBytes, err) } } } b.Negate = negate r.Matchers = append(r.Matchers, b) case inSlice(key.value, allLenMatchTypeNames()): k, err := lenMatcher(key.value) if err != nil { return fmt.Errorf("%s is not a support lenMatch keyword", key.value) } nextItem := l.nextItem() m, err := parseLenMatch(k, nextItem.value) if err != nil { return fmt.Errorf("could not parse LenMatch: %v", err) } m.DataPosition = dataPosition r.Matchers = append(r.Matchers, m) case key.value == "flowbits": nextItem := l.nextItem() fb, err := parseFlowbit(nextItem.value) if err != nil { return fmt.Errorf("error parsing flowbit: %v", err) } r.Flowbits = append(r.Flowbits, fb) case key.value == "xbits": nextItem := l.nextItem() xb, err := parseXbit(nextItem.value) if err != nil { return fmt.Errorf("error parsing xbits: %v", err) } r.Xbits = append(r.Xbits, xb) case key.value == "flowint": nextItem := l.nextItem() fi, err := parseFlowint(nextItem.value) if err != nil { return fmt.Errorf("error parsing flowint: %v", err) } r.Flowints = append(r.Flowints, fi) default: return &UnsupportedOptionError{ Options: []string{key.value}, } } return nil } // UnsupportedOptionError contains a partially parsed rule, and the options that aren't // supported for parsing. type UnsupportedOptionError struct { Rule *Rule Options []string } // Error returns a string for UnsupportedOptionError func (uoe *UnsupportedOptionError) Error() string { return fmt.Sprintf("rule contains unsupported option(s): %s", strings.Join(uoe.Options, ",")) } // parseRuleAux parses an IDS rule, optionally ignoring comments. func parseRuleAux(rule string, commented bool) (*Rule, error) { l, err := lex(rule) if err != nil { return nil, err } defer l.close() dataPosition = pktData r := &Rule{} var unsupportedOptions = make([]string, 0, 3) for item := l.nextItem(); item.typ != itemEOR && item.typ != itemEOF && err == nil; item = l.nextItem() { switch item.typ { case itemComment: if r.Action != "" || commented { // Ignore comment ending rule. return r, nil } err = r.comment(item, l) // Error here means that the comment was not a commented rule. // So we're not parsing a rule and we need to break out. if err != nil { break } // This line was a commented rule. return r, nil case itemAction: err = r.action(item, l) case itemProtocol: err = r.protocol(item, l) case itemSourceAddress, itemDestinationAddress, itemSourcePort, itemDestinationPort: err = r.network(item, l) case itemDirection: err = r.direction(item, l) case itemOptionKey: err = r.option(item, l) // We will continue to parse a rule with unsupported options. if uerr, ok := err.(*UnsupportedOptionError); ok { unsupportedOptions = append(unsupportedOptions, uerr.Options...) // This is ugly but allows the parsing to continue. err = nil } case itemError: err = errors.New(item.value) } // Unrecoverable parse error. if err != nil { return nil, err } } // If we encountered one or more unsupported keys, return an UnsupportedOptionError. if len(unsupportedOptions) > 0 { return nil, &UnsupportedOptionError{ Rule: r, Options: unsupportedOptions, } } return r, nil } // ParseRule parses an IDS rule and returns a struct describing the rule. func ParseRule(rule string) (*Rule, error) { return parseRuleAux(rule, false) }
parseContent
identifier_name
parser.go
/* Copyright 2016 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package gonids implements a basic parser of IDS rules. // // For now the parser is very basic and it only parses a subset of fields. // We intentionally omit http_encode as it doesn't seem to be used in practice. package gonids import ( "encoding/hex" "errors" "fmt" "net" "regexp" "strconv" "strings" ) // hexRE matches on hexadecimal content like |41 41 41| for example. var hexRE = regexp.MustCompile(`(?i)(\|(?:\s*[a-f0-9]{2}\s*)+\|)`) // escapeRE matches char that needs to escaped in regexp. var escapeRE = regexp.MustCompile(`([()+.'\\])`) // escapeContent matches escaped special characters. var escapeContent = regexp.MustCompile(`\\([\\;":])`) // metaSplitRE matches string in metadata. var metaSplitRE = regexp.MustCompile(`,\s*`) // nestedNetRE matches nested network groups. var nestedNetRE = regexp.MustCompile(`,(!?\[[^]]*\])`) // portSplitRE splits port lists and ranges for validation. var portSplitRE = regexp.MustCompile(`[:,]`) var appLayerProtocols = []string{ "dcerpc", "dhcp", "dnp3", "dns", "enip", "ftp", "ftp-data", "http", "http2", "icmp", "icmpv4", "icmpv6", "ikev2", "imap", "ip", "ip4", "ip6", "ipv4", "ipv6", "irc", "jabber", "krb5", "modbus", "mqtt", "nfs", "ntp", "pkthdr", "rdp", "rfb", "sctp", "sip", "smb", "smtp", "snmp", "ssh", "tcp", "tcp-pkt", "tcp-stream", "tftp", "tls", "udp", } // parseContent decodes rule content match. For now it only takes care of escaped and hex // encoded content. func parseContent(content string) ([]byte, error) { // Decode and replace all occurrences of hexadecimal content. var errpanic error defer func() { r := recover() if r != nil { errpanic = fmt.Errorf("recovered from panic: %v", r) } }() if containsUnescaped(content) { return nil, fmt.Errorf("invalid special characters escaping") } b := escapeContent.ReplaceAllString(content, "$1") b = hexRE.ReplaceAllStringFunc(b, func(h string) string { r, err := hex.DecodeString(strings.Replace(strings.Trim(h, "|"), " ", "", -1)) if err != nil { panic("invalid hexRE regexp") } return string(r) }) return []byte(b), errpanic } // parsePCRE parses the components of a PCRE. Returns PCRE struct. func parsePCRE(s string) (*PCRE, error) { c := strings.Count(s, "/") if c < 2 { return nil, fmt.Errorf("all pcre patterns must contain at least 2 '/', found: %d", c) } l := strings.LastIndex(s, "/") if l < 0 { return nil, fmt.Errorf("couldn't find options in PCRE") } i := strings.Index(s, "/") if l < 0 { return nil, fmt.Errorf("couldn't find start of pattern") } return &PCRE{ Pattern: []byte(s[i+1 : l]), Options: []byte(s[l+1:]), }, nil } // parseLenMatch parses a LenMatch (like urilen). func parseLenMatch(k lenMatchType, s string) (*LenMatch, error) { m := new(LenMatch) m.Kind = k switch { // Simple case, no operators. case !strings.ContainsAny(s, "><"): // Ignore options after ','. numTmp := strings.Split(s, ",")[0] num, err := strconv.Atoi(strings.TrimSpace(numTmp)) if err != nil { return nil, fmt.Errorf("%v is not an integer", s) } m.Num = num // Leading operator, single number. case strings.HasPrefix(s, ">") || strings.HasPrefix(s, "<"): m.Operator = s[0:1] // Strip leading < or >. numTmp := strings.TrimLeft(s, "><") // Ignore options after ','. numTmp = strings.Split(numTmp, ",")[0] num, err := strconv.Atoi(strings.TrimSpace(numTmp)) if err != nil { return nil, fmt.Errorf("%v is not an integer", s) } m.Num = num // Min/Max center operator. case strings.Contains(s, "<>"): m.Operator = "<>" parts := strings.Split(s, "<>") if len(parts) != 2 { return nil, fmt.Errorf("must have exactly 2 parts for min/max operator. got %d", len(parts)) } var min, max int var err error min, err = strconv.Atoi(strings.TrimSpace(parts[0])) if err != nil { return nil, fmt.Errorf("%v is not an integer", strings.TrimSpace(parts[0])) } maxTmp := strings.Split(parts[1], ",")[0] max, err = strconv.Atoi(strings.TrimSpace(maxTmp)) if err != nil { return nil, fmt.Errorf("%v is not an integer", strings.TrimSpace(maxTmp)) } // Do stuff to handle options here. m.Min = min m.Max = max } // Parse options: if strings.Contains(s, ",") { opts := strings.Split(s, ",")[1:] for i, o := range opts { opts[i] = strings.TrimSpace(o) } m.Options = opts } return m, nil } func parseBase64Decode(k byteMatchType, s string) (*ByteMatch, error) { if k != b64Decode { return nil, fmt.Errorf("kind %v is not base64_decode", k) } b := new(ByteMatch) b.Kind = k // All options to base64_decode are optional, and specified by their keyword. for _, p := range strings.Split(s, ",") { v := strings.TrimSpace(p) switch { case strings.HasPrefix(v, "bytes"): b.NumBytes = strings.TrimSpace(strings.SplitAfter(v, "bytes")[1]) case strings.HasPrefix(v, "offset"): val := strings.TrimSpace(strings.SplitAfter(v, "offset")[1]) i, err := strconv.Atoi(val) if err != nil { return nil, fmt.Errorf("offset is not an int: %s; %s", val, err) } if i < 1 { return nil, fmt.Errorf("offset must be positive, non-zero values only") } b.Offset = i case strings.HasPrefix(v, "relative"): b.Options = []string{"relative"} } } return b, nil } // parseByteMatch parses a ByteMatch. func parseByteMatch(k byteMatchType, s string) (*ByteMatch, error) { b := new(ByteMatch) b.Kind = k parts := strings.Split(s, ",") // Num bytes is required for all byteMatchType keywords. if len(parts) < 1 { return nil, fmt.Errorf("%s keyword has %d parts", s, len(parts)) } b.NumBytes = strings.TrimSpace(parts[0]) if len(parts) < b.Kind.minLen() { return nil, fmt.Errorf("invalid %s length: %d", b.Kind, len(parts)) } if k == bExtract || k == bJump { // Parse offset. offset, err := strconv.Atoi(strings.TrimSpace(parts[1])) if err != nil { return nil, fmt.Errorf("%s offset is not an int: %v; %s", b.Kind, parts[1], err) } b.Offset = offset } if k == bExtract { // Parse variable name. name := parts[2] b.Variable = name } if k == bTest { // Parse operator. b.Operator = strings.TrimSpace(parts[1]) // Parse value. Can use a variable. b.Value = strings.TrimSpace(parts[2]) // Parse offset. offset, err := strconv.Atoi(strings.TrimSpace(parts[3])) if err != nil { return nil, fmt.Errorf("%s offset is not an int: %v; %s", b.Kind, parts[1], err) } b.Offset = offset } // The rest of the options, for all types not b64decode for i, l := b.Kind.minLen(), len(parts); i < l; i++ { parts[i] = strings.TrimSpace(parts[i]) b.Options = append(b.Options, parts[i]) } return b, nil } // parseFlowbit parses a flowbit. func parseFlowbit(s string) (*Flowbit, error) { parts := strings.Split(s, ",") if len(parts) < 1 { return nil, fmt.Errorf("couldn't parse flowbit string: %s", s) } // Ensure all actions are of valid type. a := strings.TrimSpace(parts[0]) if !inSlice(a, []string{"noalert", "isset", "isnotset", "set", "unset", "toggle"}) { return nil, fmt.Errorf("invalid action for flowbit: %s", a) } fb := &Flowbit{ Action: a, } if fb.Action == "noalert" && len(parts) > 1 { return nil, fmt.Errorf("noalert shouldn't have a value") } if len(parts) == 2 { fb.Value = strings.TrimSpace(parts[1]) } return fb, nil } // parseXbit parses an xbit. func parseXbit(s string) (*Xbit, error) { parts := strings.Split(s, ",") // All xbits must have an action, name and track if len(parts) < 3 { return nil, fmt.Errorf("not enough parts for xbits: %s", s) } // Ensure all actions are of valid type. a := strings.TrimSpace(parts[0]) if !inSlice(a, []string{"set", "unset", "isset", "isnotset", "toggle"}) { return nil, fmt.Errorf("invalid action for xbits: %s", a) } xb := &Xbit{ Action: a, Name: strings.TrimSpace(parts[1]), } // Track. t := strings.Fields(parts[2]) if len(t) != 2 { return nil, fmt.Errorf("wrong number of parts for track: %v", t) } if t[0] != "track" { return nil, fmt.Errorf("%s should be 'track'", t[0]) } xb.Track = t[1] // Expire if len(parts) == 4 { e := strings.Fields(parts[3]) if len(e) != 2 { return nil, fmt.Errorf("wrong number of parts for expire: %v", e) } if e[0] != "expire" { return nil, fmt.Errorf("%s should be 'expire'", e[0]) } xb.Expire = e[1] } return xb, nil } // parseFlowint parses a flowint. func parseFlowint(s string) (*Flowint, error) { parts := strings.Split(s, ",") // All flowints must have a name and modifier if len(parts) < 2 { return nil, fmt.Errorf("not enough parts for flowint: %s", s) } // Ensure all actions are of valid type. m := strings.TrimSpace(parts[1]) if !inSlice(m, []string{"+", "-", "=", ">", "<", ">=", "<=", "==", "!=", "isset", "isnotset"}) { return nil, fmt.Errorf("invalid modifier for flowint: %s", m) } fi := &Flowint{ Name: strings.TrimSpace(parts[0]), Modifier: m, } if len(parts) == 3 { fi.Value = strings.TrimSpace(parts[2]) } return fi, nil } // containsUnescaped checks content whether special characters are properly escaped. func containsUnescaped(s string) bool { esc := false for _, b := range s { if esc { switch b { case '\\', ';', '"', ':': esc = false default: return true } } else { switch b { case '\\': esc = true case ';', '"': return true } } } return esc } func unquote(s string) string { if strings.IndexByte(s, '"') < 0 { return s } return strings.Replace(s, `\"`, `"`, -1) } func inSlice(str string, strings []string) bool { for _, k := range strings { if str == k { return true } } return false } // comment decodes a comment (commented rule, or just a comment.) func (r *Rule) comment(key item, l *lexer) error { if key.typ != itemComment { panic("item is not a comment") } if r.Disabled { // ignoring comment for rule with empty action return nil } rule, err := parseRuleAux(key.value, true) // If there was an error this means the comment is not a rule. if err != nil { return fmt.Errorf("this is not a rule: %s", err) } // We parsed a rule, this was a comment so set the rule to disabled. rule.Disabled = true // Overwrite the rule we're working on with the recently parsed, disabled rule. *r = *rule return nil } // action decodes an IDS rule option based on its key. func (r *Rule) action(key item, l *lexer) error { if key.typ != itemAction { panic("item is not an action") } if !inSlice(key.value, []string{"alert", "drop", "pass"}) { return fmt.Errorf("invalid action: %v", key.value) } r.Action = key.value return nil } // protocol decodes an IDS rule protocol based on its key. func (r *Rule) protocol(key item, l *lexer) error { if key.typ != itemProtocol { panic("item is not a protocol") } if !inSlice(key.value, appLayerProtocols) { return fmt.Errorf("invalid protocol: %v", key.value) } r.Protocol = key.value return nil } // network decodes an IDS rule network (networks and ports) based on its key. func (r *Rule) network(key item, l *lexer) error { // This is a hack. We use a regexp to replace the outer `,` with `___` // to give us a discrete string to split on, avoiding the inner `,`. // Specify TrimSuffix and TrimPrefix to ensure only one instance of `[` and `]` are trimmed. tmp := strings.TrimSuffix(strings.TrimPrefix(key.value, "["), "]") items := strings.Split(nestedNetRE.ReplaceAllString(tmp, "___${1}"), "___") // Validate that no items contain spaces. for _, i := range items { if len(strings.Fields(i)) > 1 || len(strings.TrimSpace(i)) != len(i) { return fmt.Errorf("network component contains spaces: %v", i) } } switch key.typ { case itemSourceAddress: if validNetworks(items) { r.Source.Nets = append(r.Source.Nets, items...) } else { return fmt.Errorf("some or all source ips are invalid: %v", items) } case itemSourcePort: if portsValid(items) { r.Source.Ports = append(r.Source.Ports, items...) } else { return fmt.Errorf("some or all source ports are invalid: %v", items) } case itemDestinationAddress: if validNetworks(items) { r.Destination.Nets = append(r.Destination.Nets, items...) } else { return fmt.Errorf("some or all destination ips are invalid: %v", items) } case itemDestinationPort: if portsValid(items) { r.Destination.Ports = append(r.Destination.Ports, items...) } else { return fmt.Errorf("some or all destination ports are invalid: %v", items) } default: panic("item is not a network component") } return nil } // Validate that every item is between 1 and 65535. func portsValid(p []string) bool { for _, u := range p { if strings.Count(u, "[") != strings.Count(u, "]") { // unbalanced groups. return false } u = strings.TrimPrefix(u, "!") // If this port range is a grouping, check the inner group. if strings.HasPrefix(u, "[") { if portsValid(strings.Split(strings.Trim(u, "[]"), ",")) { continue } return false } ports := portSplitRE.Split(u, -1) for _, port := range ports { port = strings.TrimPrefix(port, "!") if port == "any" || port == "" || strings.HasPrefix(port, "$") { continue } x, err := strconv.Atoi(port) if err != nil { return false } if x > 65535 || x < 0 { return false } } } return true } // Validate item is either a valid ip or ip range. func validNetwork(i string) bool { _, _, err := net.ParseCIDR(i) if err == nil { return true } if net.ParseIP(i) != nil { return true } return false } // Validate every item is either a valid ip or ip range. func validNetworks(nets []string) bool { for _, net := range nets { if strings.Count(net, "[") != strings.Count(net, "]") { // unbalanced groups. return false } net = strings.TrimPrefix(net, "!") // If this network is a grouping, check the inner group. if strings.HasPrefix(net, "[") || strings.Contains(net, ",") { if validNetworks(strings.Split(strings.Trim(net, "[]"), ",")) { continue } return false } switch { case net == "any": continue case strings.HasPrefix(net, "$"): continue case !validNetwork(net): return false } } return true } // direction decodes an IDS rule direction based on its key. func (r *Rule) direction(key item, l *lexer) error { if key.typ != itemDirection { panic("item is not a direction") } switch key.value { case "->": r.Bidirectional = false case "<>": r.Bidirectional = true default: return fmt.Errorf("invalid direction operator %q", key.value) } return nil } var dataPosition = pktData // option decodes an IDS rule option based on its key. func (r *Rule) option(key item, l *lexer) error { if key.typ != itemOptionKey { panic("item is not an option key") } switch { // TODO: Many of these simple tags could be factored into nicer structures. case inSlice(key.value, []string{"classtype", "flow", "tag", "priority", "app-layer-protocol", "noalert", "target", "flags", "ipopts", "ip_proto", "geoip", "fragbits", "fragoffset", "tos", "window", "threshold", "detection_filter", "dce_iface", "dce_opnum", "dce_stub_data", "asn1"}): nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return fmt.Errorf("no valid value for %s tag", key.value) } if r.Tags == nil { r.Tags = make(map[string]string) } r.Tags[key.value] = nextItem.value case inSlice(key.value, []string{"sameip", "tls.store", "ftpbounce"}): r.Statements = append(r.Statements, key.value) case inSlice(key.value, tlsTags): t := &TLSTag{ Key: key.value, } nextItem := l.nextItem() if nextItem.typ == itemNot { t.Negate = true nextItem = l.nextItem() } t.Value = nextItem.value r.TLSTags = append(r.TLSTags, t) case key.value == "stream_size": nextItem := l.nextItem() parts := strings.Split(nextItem.value, ",") if len(parts) != 3 { return fmt.Errorf("invalid number of parts for stream_size: %d", len(parts)) } num, err := strconv.Atoi(strings.TrimSpace(parts[2])) if err != nil { return fmt.Errorf("comparison number is not an integer: %v", parts[2]) } r.StreamMatch = &StreamCmp{ Direction: parts[0], Operator: parts[1], Number: num, } case key.value == "reference": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no valid value for reference") } refs := strings.SplitN(nextItem.value, ",", 2) if len(refs) != 2 { return fmt.Errorf("invalid reference definition: %s", refs) } r.References = append(r.References, &Reference{Type: refs[0], Value: refs[1]}) case key.value == "metadata": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no valid value for metadata") } metas := metaSplitRE.Split(nextItem.value, -1) for _, kv := range metas { metaTmp := strings.SplitN(kv, " ", 2) if len(metaTmp) != 2 { return fmt.Errorf("invalid metadata definition: %s", metaTmp) } r.Metas = append(r.Metas, &Metadata{Key: strings.TrimSpace(metaTmp[0]), Value: strings.TrimSpace(metaTmp[1])}) } case key.value == "sid": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no value for option sid") } sid, err := strconv.Atoi(nextItem.value) if err != nil { return fmt.Errorf("invalid sid %s", nextItem.value) } r.SID = sid case key.value == "rev": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no value for option rev") } rev, err := strconv.Atoi(nextItem.value) if err != nil { return fmt.Errorf("invalid rev %s", nextItem.value) } r.Revision = rev case key.value == "msg": nextItem := l.nextItem() if nextItem.typ != itemOptionValueString { return errors.New("no value for option msg") } r.Description = nextItem.value case isStickyBuffer(key.value): var d DataPos var err error if d, err = StickyBuffer(key.value); err != nil { return err } dataPosition = d case inSlice(key.value, []string{"content", "uricontent"}): nextItem := l.nextItem() negate := false if nextItem.typ == itemNot { nextItem = l.nextItem() negate = true } if nextItem.typ == itemOptionValueString { c, err := parseContent(nextItem.value) if err != nil { return err } var options []*ContentOption if key.value == "uricontent" { options = append(options, &ContentOption{Name: "http_uri"}) } con := &Content{ DataPosition: dataPosition, Pattern: c, Negate: negate, Options: options, } r.Matchers = append(r.Matchers, con) } else { return fmt.Errorf("invalid type %q for option content", nextItem.typ) } case inSlice(key.value, []string{"http_cookie", "http_raw_cookie", "http_method", "http_header", "http_raw_header", "http_uri", "http_raw_uri", "http_user_agent", "http_stat_code", "http_stat_msg", "http_client_body", "http_server_body", "http_host", "nocase", "rawbytes", "startswith", "endswith"}): lastContent := r.LastContent() if lastContent == nil { return fmt.Errorf("invalid content option %q with no content match", key.value) } lastContent.Options = append(lastContent.Options, &ContentOption{Name: key.value}) case inSlice(key.value, []string{"depth", "distance", "offset", "within"}): lastContent := r.LastContent() if lastContent == nil { return fmt.Errorf("invalid content option %q with no content match", key.value) } nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return fmt.Errorf("no value for content option %s", key.value) } lastContent.Options = append(lastContent.Options, &ContentOption{Name: key.value, Value: nextItem.value}) case key.value == "fast_pattern": lastContent := r.LastContent() if lastContent == nil { return fmt.Errorf("invalid content option %q with no content match", key.value) } var ( only bool offset int length int ) nextItem := l.nextItem() if nextItem.typ == itemOptionValue { v := nextItem.value switch { case v == "only":
only = true case strings.Contains(v, ","): s := strings.Split(v, ",") i, err := strconv.Atoi(s[0]) if err != nil { return fmt.Errorf("fast_pattern offset is not an int: %s; %s", s[0], err) } offset = i i, err = strconv.Atoi(s[1]) if err != nil { return fmt.Errorf("fast_pattern length is not an int: %s; %s", s[1], err) } length = i } } lastContent.FastPattern = FastPattern{true, only, offset, length} case key.value == "pcre": nextItem := l.nextItem() negate := false if nextItem.typ == itemNot { nextItem = l.nextItem() negate = true } if nextItem.typ == itemOptionValueString { p, err := parsePCRE(unquote(nextItem.value)) if err != nil { return err } p.DataPosition = dataPosition p.Negate = negate r.Matchers = append(r.Matchers, p) } else { return fmt.Errorf("invalid type %q for option content", nextItem.typ) } case inSlice(key.value, allbyteMatchTypeNames()): k, err := byteMatcher(key.value) if err != nil { return fmt.Errorf("%s is not a supported byteMatchType keyword", key.value) } // Handle negation logic here, don't want to pass lexer to parseByteMatch. nextItem := l.nextItem() var negate bool if k == isDataAt && nextItem.typ == itemNot { negate = true nextItem = l.nextItem() } var b *ByteMatch // Parse base64_decode differently as it has odd semantics. if k == b64Decode { b, err = parseBase64Decode(k, nextItem.value) if err != nil { return fmt.Errorf("could not parse base64Decode: %v", err) } // base64_decode allows NumBytes to be empty, an int or a variable. if i, err := strconv.Atoi(b.NumBytes); err != nil && b.NumBytes != "" { // NumBytes is not an int, check if it is a variable from byte_extract. if !r.HasVar(b.NumBytes) { return fmt.Errorf("number of bytes is not an int, or an extracted variable: %s; %s", b.NumBytes, err) } else if i < 1 { return fmt.Errorf("bytes must be positive, non-zero values only: %d", i) } } } else { b, err = parseByteMatch(k, nextItem.value) if err != nil { return fmt.Errorf("could not parse byteMatch: %v", err) } if _, err := strconv.Atoi(b.NumBytes); err != nil { // NumBytes is not an int, check if it is a variable from byte_extract. if !r.HasVar(b.NumBytes) { return fmt.Errorf("number of bytes is not an int, or an extracted variable: %s; %s", b.NumBytes, err) } } } b.Negate = negate r.Matchers = append(r.Matchers, b) case inSlice(key.value, allLenMatchTypeNames()): k, err := lenMatcher(key.value) if err != nil { return fmt.Errorf("%s is not a support lenMatch keyword", key.value) } nextItem := l.nextItem() m, err := parseLenMatch(k, nextItem.value) if err != nil { return fmt.Errorf("could not parse LenMatch: %v", err) } m.DataPosition = dataPosition r.Matchers = append(r.Matchers, m) case key.value == "flowbits": nextItem := l.nextItem() fb, err := parseFlowbit(nextItem.value) if err != nil { return fmt.Errorf("error parsing flowbit: %v", err) } r.Flowbits = append(r.Flowbits, fb) case key.value == "xbits": nextItem := l.nextItem() xb, err := parseXbit(nextItem.value) if err != nil { return fmt.Errorf("error parsing xbits: %v", err) } r.Xbits = append(r.Xbits, xb) case key.value == "flowint": nextItem := l.nextItem() fi, err := parseFlowint(nextItem.value) if err != nil { return fmt.Errorf("error parsing flowint: %v", err) } r.Flowints = append(r.Flowints, fi) default: return &UnsupportedOptionError{ Options: []string{key.value}, } } return nil } // UnsupportedOptionError contains a partially parsed rule, and the options that aren't // supported for parsing. type UnsupportedOptionError struct { Rule *Rule Options []string } // Error returns a string for UnsupportedOptionError func (uoe *UnsupportedOptionError) Error() string { return fmt.Sprintf("rule contains unsupported option(s): %s", strings.Join(uoe.Options, ",")) } // parseRuleAux parses an IDS rule, optionally ignoring comments. func parseRuleAux(rule string, commented bool) (*Rule, error) { l, err := lex(rule) if err != nil { return nil, err } defer l.close() dataPosition = pktData r := &Rule{} var unsupportedOptions = make([]string, 0, 3) for item := l.nextItem(); item.typ != itemEOR && item.typ != itemEOF && err == nil; item = l.nextItem() { switch item.typ { case itemComment: if r.Action != "" || commented { // Ignore comment ending rule. return r, nil } err = r.comment(item, l) // Error here means that the comment was not a commented rule. // So we're not parsing a rule and we need to break out. if err != nil { break } // This line was a commented rule. return r, nil case itemAction: err = r.action(item, l) case itemProtocol: err = r.protocol(item, l) case itemSourceAddress, itemDestinationAddress, itemSourcePort, itemDestinationPort: err = r.network(item, l) case itemDirection: err = r.direction(item, l) case itemOptionKey: err = r.option(item, l) // We will continue to parse a rule with unsupported options. if uerr, ok := err.(*UnsupportedOptionError); ok { unsupportedOptions = append(unsupportedOptions, uerr.Options...) // This is ugly but allows the parsing to continue. err = nil } case itemError: err = errors.New(item.value) } // Unrecoverable parse error. if err != nil { return nil, err } } // If we encountered one or more unsupported keys, return an UnsupportedOptionError. if len(unsupportedOptions) > 0 { return nil, &UnsupportedOptionError{ Rule: r, Options: unsupportedOptions, } } return r, nil } // ParseRule parses an IDS rule and returns a struct describing the rule. func ParseRule(rule string) (*Rule, error) { return parseRuleAux(rule, false) }
random_line_split
parser.go
/* Copyright 2016 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package gonids implements a basic parser of IDS rules. // // For now the parser is very basic and it only parses a subset of fields. // We intentionally omit http_encode as it doesn't seem to be used in practice. package gonids import ( "encoding/hex" "errors" "fmt" "net" "regexp" "strconv" "strings" ) // hexRE matches on hexadecimal content like |41 41 41| for example. var hexRE = regexp.MustCompile(`(?i)(\|(?:\s*[a-f0-9]{2}\s*)+\|)`) // escapeRE matches char that needs to escaped in regexp. var escapeRE = regexp.MustCompile(`([()+.'\\])`) // escapeContent matches escaped special characters. var escapeContent = regexp.MustCompile(`\\([\\;":])`) // metaSplitRE matches string in metadata. var metaSplitRE = regexp.MustCompile(`,\s*`) // nestedNetRE matches nested network groups. var nestedNetRE = regexp.MustCompile(`,(!?\[[^]]*\])`) // portSplitRE splits port lists and ranges for validation. var portSplitRE = regexp.MustCompile(`[:,]`) var appLayerProtocols = []string{ "dcerpc", "dhcp", "dnp3", "dns", "enip", "ftp", "ftp-data", "http", "http2", "icmp", "icmpv4", "icmpv6", "ikev2", "imap", "ip", "ip4", "ip6", "ipv4", "ipv6", "irc", "jabber", "krb5", "modbus", "mqtt", "nfs", "ntp", "pkthdr", "rdp", "rfb", "sctp", "sip", "smb", "smtp", "snmp", "ssh", "tcp", "tcp-pkt", "tcp-stream", "tftp", "tls", "udp", } // parseContent decodes rule content match. For now it only takes care of escaped and hex // encoded content. func parseContent(content string) ([]byte, error) { // Decode and replace all occurrences of hexadecimal content. var errpanic error defer func() { r := recover() if r != nil { errpanic = fmt.Errorf("recovered from panic: %v", r) } }() if containsUnescaped(content) { return nil, fmt.Errorf("invalid special characters escaping") } b := escapeContent.ReplaceAllString(content, "$1") b = hexRE.ReplaceAllStringFunc(b, func(h string) string { r, err := hex.DecodeString(strings.Replace(strings.Trim(h, "|"), " ", "", -1)) if err != nil { panic("invalid hexRE regexp") } return string(r) }) return []byte(b), errpanic } // parsePCRE parses the components of a PCRE. Returns PCRE struct. func parsePCRE(s string) (*PCRE, error) { c := strings.Count(s, "/") if c < 2 { return nil, fmt.Errorf("all pcre patterns must contain at least 2 '/', found: %d", c) } l := strings.LastIndex(s, "/") if l < 0 { return nil, fmt.Errorf("couldn't find options in PCRE") } i := strings.Index(s, "/") if l < 0 { return nil, fmt.Errorf("couldn't find start of pattern") } return &PCRE{ Pattern: []byte(s[i+1 : l]), Options: []byte(s[l+1:]), }, nil } // parseLenMatch parses a LenMatch (like urilen). func parseLenMatch(k lenMatchType, s string) (*LenMatch, error) { m := new(LenMatch) m.Kind = k switch { // Simple case, no operators. case !strings.ContainsAny(s, "><"): // Ignore options after ','. numTmp := strings.Split(s, ",")[0] num, err := strconv.Atoi(strings.TrimSpace(numTmp)) if err != nil { return nil, fmt.Errorf("%v is not an integer", s) } m.Num = num // Leading operator, single number. case strings.HasPrefix(s, ">") || strings.HasPrefix(s, "<"): m.Operator = s[0:1] // Strip leading < or >. numTmp := strings.TrimLeft(s, "><") // Ignore options after ','. numTmp = strings.Split(numTmp, ",")[0] num, err := strconv.Atoi(strings.TrimSpace(numTmp)) if err != nil { return nil, fmt.Errorf("%v is not an integer", s) } m.Num = num // Min/Max center operator. case strings.Contains(s, "<>"): m.Operator = "<>" parts := strings.Split(s, "<>") if len(parts) != 2 { return nil, fmt.Errorf("must have exactly 2 parts for min/max operator. got %d", len(parts)) } var min, max int var err error min, err = strconv.Atoi(strings.TrimSpace(parts[0])) if err != nil { return nil, fmt.Errorf("%v is not an integer", strings.TrimSpace(parts[0])) } maxTmp := strings.Split(parts[1], ",")[0] max, err = strconv.Atoi(strings.TrimSpace(maxTmp)) if err != nil { return nil, fmt.Errorf("%v is not an integer", strings.TrimSpace(maxTmp)) } // Do stuff to handle options here. m.Min = min m.Max = max } // Parse options: if strings.Contains(s, ",") { opts := strings.Split(s, ",")[1:] for i, o := range opts { opts[i] = strings.TrimSpace(o) } m.Options = opts } return m, nil } func parseBase64Decode(k byteMatchType, s string) (*ByteMatch, error) { if k != b64Decode { return nil, fmt.Errorf("kind %v is not base64_decode", k) } b := new(ByteMatch) b.Kind = k // All options to base64_decode are optional, and specified by their keyword. for _, p := range strings.Split(s, ",") { v := strings.TrimSpace(p) switch { case strings.HasPrefix(v, "bytes"): b.NumBytes = strings.TrimSpace(strings.SplitAfter(v, "bytes")[1]) case strings.HasPrefix(v, "offset"): val := strings.TrimSpace(strings.SplitAfter(v, "offset")[1]) i, err := strconv.Atoi(val) if err != nil { return nil, fmt.Errorf("offset is not an int: %s; %s", val, err) } if i < 1 { return nil, fmt.Errorf("offset must be positive, non-zero values only") } b.Offset = i case strings.HasPrefix(v, "relative"): b.Options = []string{"relative"} } } return b, nil } // parseByteMatch parses a ByteMatch. func parseByteMatch(k byteMatchType, s string) (*ByteMatch, error) { b := new(ByteMatch) b.Kind = k parts := strings.Split(s, ",") // Num bytes is required for all byteMatchType keywords. if len(parts) < 1 { return nil, fmt.Errorf("%s keyword has %d parts", s, len(parts)) } b.NumBytes = strings.TrimSpace(parts[0]) if len(parts) < b.Kind.minLen() { return nil, fmt.Errorf("invalid %s length: %d", b.Kind, len(parts)) } if k == bExtract || k == bJump { // Parse offset. offset, err := strconv.Atoi(strings.TrimSpace(parts[1])) if err != nil { return nil, fmt.Errorf("%s offset is not an int: %v; %s", b.Kind, parts[1], err) } b.Offset = offset } if k == bExtract { // Parse variable name. name := parts[2] b.Variable = name } if k == bTest { // Parse operator. b.Operator = strings.TrimSpace(parts[1]) // Parse value. Can use a variable. b.Value = strings.TrimSpace(parts[2]) // Parse offset. offset, err := strconv.Atoi(strings.TrimSpace(parts[3])) if err != nil { return nil, fmt.Errorf("%s offset is not an int: %v; %s", b.Kind, parts[1], err) } b.Offset = offset } // The rest of the options, for all types not b64decode for i, l := b.Kind.minLen(), len(parts); i < l; i++ { parts[i] = strings.TrimSpace(parts[i]) b.Options = append(b.Options, parts[i]) } return b, nil } // parseFlowbit parses a flowbit. func parseFlowbit(s string) (*Flowbit, error) { parts := strings.Split(s, ",") if len(parts) < 1 { return nil, fmt.Errorf("couldn't parse flowbit string: %s", s) } // Ensure all actions are of valid type. a := strings.TrimSpace(parts[0]) if !inSlice(a, []string{"noalert", "isset", "isnotset", "set", "unset", "toggle"}) { return nil, fmt.Errorf("invalid action for flowbit: %s", a) } fb := &Flowbit{ Action: a, } if fb.Action == "noalert" && len(parts) > 1 { return nil, fmt.Errorf("noalert shouldn't have a value") } if len(parts) == 2 { fb.Value = strings.TrimSpace(parts[1]) } return fb, nil } // parseXbit parses an xbit. func parseXbit(s string) (*Xbit, error) { parts := strings.Split(s, ",") // All xbits must have an action, name and track if len(parts) < 3 { return nil, fmt.Errorf("not enough parts for xbits: %s", s) } // Ensure all actions are of valid type. a := strings.TrimSpace(parts[0]) if !inSlice(a, []string{"set", "unset", "isset", "isnotset", "toggle"}) { return nil, fmt.Errorf("invalid action for xbits: %s", a) } xb := &Xbit{ Action: a, Name: strings.TrimSpace(parts[1]), } // Track. t := strings.Fields(parts[2]) if len(t) != 2 { return nil, fmt.Errorf("wrong number of parts for track: %v", t) } if t[0] != "track" { return nil, fmt.Errorf("%s should be 'track'", t[0]) } xb.Track = t[1] // Expire if len(parts) == 4 { e := strings.Fields(parts[3]) if len(e) != 2 { return nil, fmt.Errorf("wrong number of parts for expire: %v", e) } if e[0] != "expire" { return nil, fmt.Errorf("%s should be 'expire'", e[0]) } xb.Expire = e[1] } return xb, nil } // parseFlowint parses a flowint. func parseFlowint(s string) (*Flowint, error) { parts := strings.Split(s, ",") // All flowints must have a name and modifier if len(parts) < 2 { return nil, fmt.Errorf("not enough parts for flowint: %s", s) } // Ensure all actions are of valid type. m := strings.TrimSpace(parts[1]) if !inSlice(m, []string{"+", "-", "=", ">", "<", ">=", "<=", "==", "!=", "isset", "isnotset"}) { return nil, fmt.Errorf("invalid modifier for flowint: %s", m) } fi := &Flowint{ Name: strings.TrimSpace(parts[0]), Modifier: m, } if len(parts) == 3 { fi.Value = strings.TrimSpace(parts[2]) } return fi, nil } // containsUnescaped checks content whether special characters are properly escaped. func containsUnescaped(s string) bool { esc := false for _, b := range s { if esc { switch b { case '\\', ';', '"', ':': esc = false default: return true } } else { switch b { case '\\': esc = true case ';', '"': return true } } } return esc } func unquote(s string) string { if strings.IndexByte(s, '"') < 0 { return s } return strings.Replace(s, `\"`, `"`, -1) } func inSlice(str string, strings []string) bool { for _, k := range strings { if str == k { return true } } return false } // comment decodes a comment (commented rule, or just a comment.) func (r *Rule) comment(key item, l *lexer) error { if key.typ != itemComment { panic("item is not a comment") } if r.Disabled { // ignoring comment for rule with empty action return nil } rule, err := parseRuleAux(key.value, true) // If there was an error this means the comment is not a rule. if err != nil { return fmt.Errorf("this is not a rule: %s", err) } // We parsed a rule, this was a comment so set the rule to disabled. rule.Disabled = true // Overwrite the rule we're working on with the recently parsed, disabled rule. *r = *rule return nil } // action decodes an IDS rule option based on its key. func (r *Rule) action(key item, l *lexer) error { if key.typ != itemAction { panic("item is not an action") } if !inSlice(key.value, []string{"alert", "drop", "pass"}) { return fmt.Errorf("invalid action: %v", key.value) } r.Action = key.value return nil } // protocol decodes an IDS rule protocol based on its key. func (r *Rule) protocol(key item, l *lexer) error { if key.typ != itemProtocol { panic("item is not a protocol") } if !inSlice(key.value, appLayerProtocols) { return fmt.Errorf("invalid protocol: %v", key.value) } r.Protocol = key.value return nil } // network decodes an IDS rule network (networks and ports) based on its key. func (r *Rule) network(key item, l *lexer) error { // This is a hack. We use a regexp to replace the outer `,` with `___` // to give us a discrete string to split on, avoiding the inner `,`. // Specify TrimSuffix and TrimPrefix to ensure only one instance of `[` and `]` are trimmed. tmp := strings.TrimSuffix(strings.TrimPrefix(key.value, "["), "]") items := strings.Split(nestedNetRE.ReplaceAllString(tmp, "___${1}"), "___") // Validate that no items contain spaces. for _, i := range items { if len(strings.Fields(i)) > 1 || len(strings.TrimSpace(i)) != len(i) { return fmt.Errorf("network component contains spaces: %v", i) } } switch key.typ { case itemSourceAddress: if validNetworks(items) { r.Source.Nets = append(r.Source.Nets, items...) } else { return fmt.Errorf("some or all source ips are invalid: %v", items) } case itemSourcePort: if portsValid(items) { r.Source.Ports = append(r.Source.Ports, items...) } else { return fmt.Errorf("some or all source ports are invalid: %v", items) } case itemDestinationAddress: if validNetworks(items) { r.Destination.Nets = append(r.Destination.Nets, items...) } else { return fmt.Errorf("some or all destination ips are invalid: %v", items) } case itemDestinationPort: if portsValid(items) { r.Destination.Ports = append(r.Destination.Ports, items...) } else { return fmt.Errorf("some or all destination ports are invalid: %v", items) } default: panic("item is not a network component") } return nil } // Validate that every item is between 1 and 65535. func portsValid(p []string) bool { for _, u := range p { if strings.Count(u, "[") != strings.Count(u, "]") { // unbalanced groups. return false } u = strings.TrimPrefix(u, "!") // If this port range is a grouping, check the inner group. if strings.HasPrefix(u, "[") { if portsValid(strings.Split(strings.Trim(u, "[]"), ","))
return false } ports := portSplitRE.Split(u, -1) for _, port := range ports { port = strings.TrimPrefix(port, "!") if port == "any" || port == "" || strings.HasPrefix(port, "$") { continue } x, err := strconv.Atoi(port) if err != nil { return false } if x > 65535 || x < 0 { return false } } } return true } // Validate item is either a valid ip or ip range. func validNetwork(i string) bool { _, _, err := net.ParseCIDR(i) if err == nil { return true } if net.ParseIP(i) != nil { return true } return false } // Validate every item is either a valid ip or ip range. func validNetworks(nets []string) bool { for _, net := range nets { if strings.Count(net, "[") != strings.Count(net, "]") { // unbalanced groups. return false } net = strings.TrimPrefix(net, "!") // If this network is a grouping, check the inner group. if strings.HasPrefix(net, "[") || strings.Contains(net, ",") { if validNetworks(strings.Split(strings.Trim(net, "[]"), ",")) { continue } return false } switch { case net == "any": continue case strings.HasPrefix(net, "$"): continue case !validNetwork(net): return false } } return true } // direction decodes an IDS rule direction based on its key. func (r *Rule) direction(key item, l *lexer) error { if key.typ != itemDirection { panic("item is not a direction") } switch key.value { case "->": r.Bidirectional = false case "<>": r.Bidirectional = true default: return fmt.Errorf("invalid direction operator %q", key.value) } return nil } var dataPosition = pktData // option decodes an IDS rule option based on its key. func (r *Rule) option(key item, l *lexer) error { if key.typ != itemOptionKey { panic("item is not an option key") } switch { // TODO: Many of these simple tags could be factored into nicer structures. case inSlice(key.value, []string{"classtype", "flow", "tag", "priority", "app-layer-protocol", "noalert", "target", "flags", "ipopts", "ip_proto", "geoip", "fragbits", "fragoffset", "tos", "window", "threshold", "detection_filter", "dce_iface", "dce_opnum", "dce_stub_data", "asn1"}): nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return fmt.Errorf("no valid value for %s tag", key.value) } if r.Tags == nil { r.Tags = make(map[string]string) } r.Tags[key.value] = nextItem.value case inSlice(key.value, []string{"sameip", "tls.store", "ftpbounce"}): r.Statements = append(r.Statements, key.value) case inSlice(key.value, tlsTags): t := &TLSTag{ Key: key.value, } nextItem := l.nextItem() if nextItem.typ == itemNot { t.Negate = true nextItem = l.nextItem() } t.Value = nextItem.value r.TLSTags = append(r.TLSTags, t) case key.value == "stream_size": nextItem := l.nextItem() parts := strings.Split(nextItem.value, ",") if len(parts) != 3 { return fmt.Errorf("invalid number of parts for stream_size: %d", len(parts)) } num, err := strconv.Atoi(strings.TrimSpace(parts[2])) if err != nil { return fmt.Errorf("comparison number is not an integer: %v", parts[2]) } r.StreamMatch = &StreamCmp{ Direction: parts[0], Operator: parts[1], Number: num, } case key.value == "reference": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no valid value for reference") } refs := strings.SplitN(nextItem.value, ",", 2) if len(refs) != 2 { return fmt.Errorf("invalid reference definition: %s", refs) } r.References = append(r.References, &Reference{Type: refs[0], Value: refs[1]}) case key.value == "metadata": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no valid value for metadata") } metas := metaSplitRE.Split(nextItem.value, -1) for _, kv := range metas { metaTmp := strings.SplitN(kv, " ", 2) if len(metaTmp) != 2 { return fmt.Errorf("invalid metadata definition: %s", metaTmp) } r.Metas = append(r.Metas, &Metadata{Key: strings.TrimSpace(metaTmp[0]), Value: strings.TrimSpace(metaTmp[1])}) } case key.value == "sid": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no value for option sid") } sid, err := strconv.Atoi(nextItem.value) if err != nil { return fmt.Errorf("invalid sid %s", nextItem.value) } r.SID = sid case key.value == "rev": nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return errors.New("no value for option rev") } rev, err := strconv.Atoi(nextItem.value) if err != nil { return fmt.Errorf("invalid rev %s", nextItem.value) } r.Revision = rev case key.value == "msg": nextItem := l.nextItem() if nextItem.typ != itemOptionValueString { return errors.New("no value for option msg") } r.Description = nextItem.value case isStickyBuffer(key.value): var d DataPos var err error if d, err = StickyBuffer(key.value); err != nil { return err } dataPosition = d case inSlice(key.value, []string{"content", "uricontent"}): nextItem := l.nextItem() negate := false if nextItem.typ == itemNot { nextItem = l.nextItem() negate = true } if nextItem.typ == itemOptionValueString { c, err := parseContent(nextItem.value) if err != nil { return err } var options []*ContentOption if key.value == "uricontent" { options = append(options, &ContentOption{Name: "http_uri"}) } con := &Content{ DataPosition: dataPosition, Pattern: c, Negate: negate, Options: options, } r.Matchers = append(r.Matchers, con) } else { return fmt.Errorf("invalid type %q for option content", nextItem.typ) } case inSlice(key.value, []string{"http_cookie", "http_raw_cookie", "http_method", "http_header", "http_raw_header", "http_uri", "http_raw_uri", "http_user_agent", "http_stat_code", "http_stat_msg", "http_client_body", "http_server_body", "http_host", "nocase", "rawbytes", "startswith", "endswith"}): lastContent := r.LastContent() if lastContent == nil { return fmt.Errorf("invalid content option %q with no content match", key.value) } lastContent.Options = append(lastContent.Options, &ContentOption{Name: key.value}) case inSlice(key.value, []string{"depth", "distance", "offset", "within"}): lastContent := r.LastContent() if lastContent == nil { return fmt.Errorf("invalid content option %q with no content match", key.value) } nextItem := l.nextItem() if nextItem.typ != itemOptionValue { return fmt.Errorf("no value for content option %s", key.value) } lastContent.Options = append(lastContent.Options, &ContentOption{Name: key.value, Value: nextItem.value}) case key.value == "fast_pattern": lastContent := r.LastContent() if lastContent == nil { return fmt.Errorf("invalid content option %q with no content match", key.value) } var ( only bool offset int length int ) nextItem := l.nextItem() if nextItem.typ == itemOptionValue { v := nextItem.value switch { case v == "only": only = true case strings.Contains(v, ","): s := strings.Split(v, ",") i, err := strconv.Atoi(s[0]) if err != nil { return fmt.Errorf("fast_pattern offset is not an int: %s; %s", s[0], err) } offset = i i, err = strconv.Atoi(s[1]) if err != nil { return fmt.Errorf("fast_pattern length is not an int: %s; %s", s[1], err) } length = i } } lastContent.FastPattern = FastPattern{true, only, offset, length} case key.value == "pcre": nextItem := l.nextItem() negate := false if nextItem.typ == itemNot { nextItem = l.nextItem() negate = true } if nextItem.typ == itemOptionValueString { p, err := parsePCRE(unquote(nextItem.value)) if err != nil { return err } p.DataPosition = dataPosition p.Negate = negate r.Matchers = append(r.Matchers, p) } else { return fmt.Errorf("invalid type %q for option content", nextItem.typ) } case inSlice(key.value, allbyteMatchTypeNames()): k, err := byteMatcher(key.value) if err != nil { return fmt.Errorf("%s is not a supported byteMatchType keyword", key.value) } // Handle negation logic here, don't want to pass lexer to parseByteMatch. nextItem := l.nextItem() var negate bool if k == isDataAt && nextItem.typ == itemNot { negate = true nextItem = l.nextItem() } var b *ByteMatch // Parse base64_decode differently as it has odd semantics. if k == b64Decode { b, err = parseBase64Decode(k, nextItem.value) if err != nil { return fmt.Errorf("could not parse base64Decode: %v", err) } // base64_decode allows NumBytes to be empty, an int or a variable. if i, err := strconv.Atoi(b.NumBytes); err != nil && b.NumBytes != "" { // NumBytes is not an int, check if it is a variable from byte_extract. if !r.HasVar(b.NumBytes) { return fmt.Errorf("number of bytes is not an int, or an extracted variable: %s; %s", b.NumBytes, err) } else if i < 1 { return fmt.Errorf("bytes must be positive, non-zero values only: %d", i) } } } else { b, err = parseByteMatch(k, nextItem.value) if err != nil { return fmt.Errorf("could not parse byteMatch: %v", err) } if _, err := strconv.Atoi(b.NumBytes); err != nil { // NumBytes is not an int, check if it is a variable from byte_extract. if !r.HasVar(b.NumBytes) { return fmt.Errorf("number of bytes is not an int, or an extracted variable: %s; %s", b.NumBytes, err) } } } b.Negate = negate r.Matchers = append(r.Matchers, b) case inSlice(key.value, allLenMatchTypeNames()): k, err := lenMatcher(key.value) if err != nil { return fmt.Errorf("%s is not a support lenMatch keyword", key.value) } nextItem := l.nextItem() m, err := parseLenMatch(k, nextItem.value) if err != nil { return fmt.Errorf("could not parse LenMatch: %v", err) } m.DataPosition = dataPosition r.Matchers = append(r.Matchers, m) case key.value == "flowbits": nextItem := l.nextItem() fb, err := parseFlowbit(nextItem.value) if err != nil { return fmt.Errorf("error parsing flowbit: %v", err) } r.Flowbits = append(r.Flowbits, fb) case key.value == "xbits": nextItem := l.nextItem() xb, err := parseXbit(nextItem.value) if err != nil { return fmt.Errorf("error parsing xbits: %v", err) } r.Xbits = append(r.Xbits, xb) case key.value == "flowint": nextItem := l.nextItem() fi, err := parseFlowint(nextItem.value) if err != nil { return fmt.Errorf("error parsing flowint: %v", err) } r.Flowints = append(r.Flowints, fi) default: return &UnsupportedOptionError{ Options: []string{key.value}, } } return nil } // UnsupportedOptionError contains a partially parsed rule, and the options that aren't // supported for parsing. type UnsupportedOptionError struct { Rule *Rule Options []string } // Error returns a string for UnsupportedOptionError func (uoe *UnsupportedOptionError) Error() string { return fmt.Sprintf("rule contains unsupported option(s): %s", strings.Join(uoe.Options, ",")) } // parseRuleAux parses an IDS rule, optionally ignoring comments. func parseRuleAux(rule string, commented bool) (*Rule, error) { l, err := lex(rule) if err != nil { return nil, err } defer l.close() dataPosition = pktData r := &Rule{} var unsupportedOptions = make([]string, 0, 3) for item := l.nextItem(); item.typ != itemEOR && item.typ != itemEOF && err == nil; item = l.nextItem() { switch item.typ { case itemComment: if r.Action != "" || commented { // Ignore comment ending rule. return r, nil } err = r.comment(item, l) // Error here means that the comment was not a commented rule. // So we're not parsing a rule and we need to break out. if err != nil { break } // This line was a commented rule. return r, nil case itemAction: err = r.action(item, l) case itemProtocol: err = r.protocol(item, l) case itemSourceAddress, itemDestinationAddress, itemSourcePort, itemDestinationPort: err = r.network(item, l) case itemDirection: err = r.direction(item, l) case itemOptionKey: err = r.option(item, l) // We will continue to parse a rule with unsupported options. if uerr, ok := err.(*UnsupportedOptionError); ok { unsupportedOptions = append(unsupportedOptions, uerr.Options...) // This is ugly but allows the parsing to continue. err = nil } case itemError: err = errors.New(item.value) } // Unrecoverable parse error. if err != nil { return nil, err } } // If we encountered one or more unsupported keys, return an UnsupportedOptionError. if len(unsupportedOptions) > 0 { return nil, &UnsupportedOptionError{ Rule: r, Options: unsupportedOptions, } } return r, nil } // ParseRule parses an IDS rule and returns a struct describing the rule. func ParseRule(rule string) (*Rule, error) { return parseRuleAux(rule, false) }
{ continue }
conditional_block
types.go
package luno import "github.com/luno/luno-go/decimal" type AccountBalance struct { // ID of the account. AccountId string `json:"account_id"` // Currency code for the asset held in this account. Asset string `json:"asset"` // The amount available to send or trade. Balance decimal.Decimal `json:"balance"` // The name set by the user upon creating the account. Name string `json:"name"` // Amount locked by Luno and cannot be sent or traded. This could be due to // open orders. Reserved decimal.Decimal `json:"reserved"` // Amount that is awaiting some sort of verification to be credited to this // account. This could be an on-chain transaction that Luno is waiting for // further block verifications to happen. Unconfirmed decimal.Decimal `json:"unconfirmed"` } type AddressMeta struct { Label string `json:"label"` Value string `json:"value"` } type CryptoDetails struct { Address string `json:"address"` Txid string `json:"txid"` } type DetailFields struct { CryptoDetails CryptoDetails `json:"crypto_details"` TradeDetails TradeDetails `json:"trade_details"` } type Kind string const ( KindExchange Kind = "EXCHANGE" KindFee Kind = "FEE" KindInterest Kind = "INTEREST" KindTransfer Kind = "TRANSFER" ) type MarketInfo struct { // Base currency code BaseCurrency string `json:"base_currency"` // Counter currency code CounterCurrency string `json:"counter_currency"` // Fee decimal places FeeScale int64 `json:"fee_scale"` // Unique identifier for the market MarketId string `json:"market_id"` // Maximum order price MaxPrice decimal.Decimal `json:"max_price"` // Maximum order volume MaxVolume decimal.Decimal `json:"max_volume"` // Minimum order price MinPrice decimal.Decimal `json:"min_price"` // Minimum order volume MinVolume decimal.Decimal `json:"min_volume"` // Price decimal places PriceScale int64 `json:"price_scale"` // Current market trading status:<br> // <code>POST_ONLY</code> Trading is indefinitely suspended. This state is // commonly used when new markets are being launched to give traders enough // time to setup their orders before trading begins. When in this status, // orders can only be posted as post-only.<br> // <code>ACTIVE</code> Trading is fully enabled.<br> // <code>SUSPENDED</code> Trading has been temporarily suspended due to very // high volatility. When in this status, orders can only be posted as // post-only.<br> TradingStatus TradingStatus `json:"trading_status"` // Volume decimal places VolumeScale int64 `json:"volume_scale"` } type Order struct { Base decimal.Decimal `json:"base"` CompletedTimestamp Time `json:"completed_timestamp"` Counter decimal.Decimal `json:"counter"` CreationTimestamp Time `json:"creation_timestamp"` ExpirationTimestamp Time `json:"expiration_timestamp"` FeeBase decimal.Decimal `json:"fee_base"` FeeCounter decimal.Decimal `json:"fee_counter"` LimitPrice decimal.Decimal `json:"limit_price"` LimitVolume decimal.Decimal `json:"limit_volume"` OrderId string `json:"order_id"`
// <code>PENDING</code> The order has been placed. Some trades may have // taken place but the order is not filled yet.<br> // <code>COMPLETE</code> The order is no longer active. It has been settled // or has been cancelled. State OrderState `json:"state"` // <code>BID</code> bid (buy) limit order.<br> // <code>ASK</code> ask (sell) limit order. Type OrderType `json:"type"` } type OrderBookEntry struct { // Limit price at which orders are trading at Price decimal.Decimal `json:"price"` // The volume available at the limit price Volume decimal.Decimal `json:"volume"` } type OrderState string const ( OrderStateComplete OrderState = "COMPLETE" OrderStatePending OrderState = "PENDING" ) type OrderType string const ( OrderTypeAsk OrderType = "ASK" OrderTypeBid OrderType = "BID" OrderTypeBuy OrderType = "BUY" OrderTypeSell OrderType = "SELL" ) type OrderV2 struct { // Amount of base filled Base decimal.Decimal `json:"base"` // Time of order completion in milliseconds CompletedTimestamp Time `json:"completed_timestamp"` // Amount of counter filled Counter decimal.Decimal `json:"counter"` // Time of order creation in milliseconds CreationTimestamp Time `json:"creation_timestamp"` // Time of order expiration in milliseconds ExpirationTimestamp Time `json:"expiration_timestamp"` // Base amount of fees to be charged FeeBase decimal.Decimal `json:"fee_base"` // Counter amount of fees to be charged FeeCounter decimal.Decimal `json:"fee_counter"` // Limit price to transact LimitPrice decimal.Decimal `json:"limit_price"` // Limit volume to transact LimitVolume decimal.Decimal `json:"limit_volume"` // The order reference OrderId string `json:"order_id"` // Specifies the market Pair string `json:"pair"` // The order intention Side Side `json:"side"` // The current state of the order // // Status meaning:<br> // <code>AWAITING</code> The order is awaiting to enter the order book.<br> // <code>PENDING</code> The order is in the order book. Some trades may // have taken place but the order is not filled yet.<br> // <code>COMPLETE</code> The order is no longer in the order book. It has // been settled/filled or has been cancelled. Status Status `json:"status"` // Direction to trigger the order StopDirection StopDirection `json:"stop_direction"` // Price to trigger the order StopPrice decimal.Decimal `json:"stop_price"` // The order type Type Type `json:"type"` } type Side string const ( SideBuy Side = "BUY" SideSell Side = "SELL" ) type Status string const ( StatusActive Status = "ACTIVE" StatusAwaiting Status = "AWAITING" StatusComplete Status = "COMPLETE" StatusDisabled Status = "DISABLED" StatusPending Status = "PENDING" StatusPostonly Status = "POSTONLY" ) type StopDirection string const ( StopDirectionAbove StopDirection = "ABOVE" StopDirectionBelow StopDirection = "BELOW" StopDirectionRelative_last_trade StopDirection = "RELATIVE_LAST_TRADE" ) type Ticker struct { // The lowest ask price Ask decimal.Decimal `json:"ask"` // The highest bid price Bid decimal.Decimal `json:"bid"` // Last trade price LastTrade decimal.Decimal `json:"last_trade"` Pair string `json:"pair"` // 24h rolling trade volume Rolling24HourVolume decimal.Decimal `json:"rolling_24_hour_volume"` // Market current status // // <code>ACTIVE</code> when the market is trading normally // // <code>POSTONLY</code> when the market has been suspended and only post-only orders will be accepted // // <code>DISABLED</code> when the market is shutdown and no orders can be accepted Status Status `json:"status"` // Unix timestamp in milliseconds of the tick Timestamp Time `json:"timestamp"` } type Trade struct { Base decimal.Decimal `json:"base"` Counter decimal.Decimal `json:"counter"` FeeBase decimal.Decimal `json:"fee_base"` FeeCounter decimal.Decimal `json:"fee_counter"` IsBuy bool `json:"is_buy"` OrderId string `json:"order_id"` Pair string `json:"pair"` Price decimal.Decimal `json:"price"` Sequence int64 `json:"sequence"` Timestamp Time `json:"timestamp"` Type OrderType `json:"type"` Volume decimal.Decimal `json:"volume"` } type TradeDetails struct { // Pair of the market Pair string `json:"pair"` // Price at which the volume traded for Price decimal.Decimal `json:"price"` // Sequence identifies the trade within a market Sequence int64 `json:"sequence"` // Volume is the amount of base traded Volume decimal.Decimal `json:"volume"` } type TradingStatus string const ( TradingStatusPost_only TradingStatus = "POST_ONLY" TradingStatusActive TradingStatus = "ACTIVE" TradingStatusSuspended TradingStatus = "SUSPENDED" ) type Transaction struct { AccountId string `json:"account_id"` Available decimal.Decimal `json:"available"` AvailableDelta decimal.Decimal `json:"available_delta"` Balance decimal.Decimal `json:"balance"` // Transaction amounts computed for convenience. BalanceDelta decimal.Decimal `json:"balance_delta"` Currency string `json:"currency"` // Human-readable description of the transaction. Description string `json:"description"` DetailFields DetailFields `json:"detail_fields"` // Human-readable label-value attributes. Details map[string]string `json:"details"` // The kind of the transaction indicates the transaction flow // // Kinds explained:<br> // <code>FEE</code> when transaction is towards Luno fees<br> // <code>TRANSFER</code> when the transaction is a one way flow of funds, e.g. a deposit or crypto send<br> // <code>EXCHANGE</code> when the transaction is part of a two way exchange, e.g. a trade or instant buy Kind Kind `json:"kind"` RowIndex int64 `json:"row_index"` Timestamp Time `json:"timestamp"` } type Transfer struct { // Amount that has been credited or debited on the account. This is always a // positive value regardless of the transfer direction. Amount decimal.Decimal `json:"amount"` // Unix time the transfer was initiated, in milliseconds CreatedAt Time `json:"created_at"` // Fee that has been charged by Luno with regards to this transfer. // This is not included in the `amount`. // For example, if you receive a transaction with the raw amount of 1 BTC // and we charge a `fee` of 0.003 BTC on this transaction you will be // credited the `amount` of 0.997 BTC. Fee decimal.Decimal `json:"fee"` // Transfer unique identifier Id string `json:"id"` // True for credit transfers, false for debits. Inbound bool `json:"inbound"` // When the transfer reflects an on-chain transaction this field will have // the transaction ID. TransactionId string `json:"transaction_id"` } type Type string const ( TypeLimit Type = "LIMIT" TypeMarket Type = "MARKET" TypeStop_limit Type = "STOP_LIMIT" ) type Withdrawal struct { Amount decimal.Decimal `json:"amount"` CreatedAt Time `json:"created_at"` Currency string `json:"currency"` ExternalId string `json:"external_id"` Fee decimal.Decimal `json:"fee"` Id string `json:"id"` Status string `json:"status"` Type string `json:"type"` } type beneficiary struct { BankAccountBranch string `json:"bank_account_branch"` BankAccountNumber string `json:"bank_account_number"` BankAccountType string `json:"bank_account_type"` BankCountry string `json:"bank_country"` BankName string `json:"bank_name"` BankRecipient string `json:"bank_recipient"` CreatedAt Time `json:"created_at"` Id string `json:"id"` } // vi: ft=go
// Specifies the market. Pair string `json:"pair"`
random_line_split
clipmap.rs
use crate::prelude::{ChunkKey, ChunkKey3, ChunkUnits, OctreeNode, OctreeSet, VisitStatus}; use building_blocks_core::prelude::*; #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct ClipMapConfig3 { /// The number of levels of detail. num_lods: u8, /// The radius (in chunks) of a clipbox at any level of detail. clip_box_radius: i32, /// The shape of every chunk, regardless of LOD. Note that while a chunk at a higher LOD takes up more world space, it has /// the same shape as chunks at lower levels, because the voxel size also changes. /// /// **WARNING**: As of now, chunks must be cubes. chunk_shape: Point3i, } impl ClipMapConfig3 { pub fn new(num_lods: u8, clip_box_radius: ChunkUnits<u16>, chunk_shape: Point3i) -> Self { assert!(clip_box_radius.0 >= 2); // Radius 1 doesn't work for any more than a single LOD, so why are you using a clipmap? assert!(chunk_shape.dimensions_are_powers_of_2()); Self { num_lods, clip_box_radius: clip_box_radius.0 as i32, chunk_shape, } } pub fn chunk_edge_length_log2(&self) -> i32 { assert!(self.chunk_shape.is_cube()); self.chunk_shape.x().trailing_zeros() as i32 } } /// Traverse `octree` to find the `ChunkKey3`s that are "active" when the clipmap is centered at `lod0_center`. `active_rx` /// is a callback that receives the chunk keys for active chunks. pub fn active_clipmap_lod_chunks( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, mut active_rx: impl FnMut(ChunkKey3), ) { let chunk_log2 = config.chunk_edge_length_log2(); let centers = all_lod_centers(lod0_center.0, config.num_lods); let high_lod_boundary = config.clip_box_radius >> 1; octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= config.num_lods { return VisitStatus::Continue; } let offset_from_center = get_offset_from_lod_center(octant, &centers); if lod == 0 || offset_from_center > high_lod_boundary { // This octant can be rendered at this level of detail. active_rx(octant_chunk_key(chunk_log2, octant)); VisitStatus::Stop } else { // This octant should be rendered with more detail. VisitStatus::Continue } }); } /// A notification that a chunk (at a particular level of detail) must be split or merged. This is usually the result of a /// camera movement. #[derive(Clone, Debug, Eq, PartialEq)] pub enum LodChunkUpdate<N> { Split(SplitChunk<N>), Merge(MergeChunks<N>), } /// A 3-dimensional `LodChunkUpdate`. pub type LodChunkUpdate3 = LodChunkUpdate<[i32; 3]>; /// Split `old_chunk` into many `new_chunks`. The number of new chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct SplitChunk<N> { pub old_chunk: ChunkKey<N>, pub new_chunks: Vec<ChunkKey<N>>, } /// Merge many `old_chunks` into `new_chunk`. The number of old chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct MergeChunks<N> { pub old_chunks: Vec<ChunkKey<N>>, pub new_chunk: ChunkKey<N>, } /// A transient object used for running the `find_chunk_updates` method on multiple octrees. pub struct ClipMapUpdate3 { chunk_log2: i32, num_lods: u8, low_lod_boundary: i32, high_lod_boundary: i32, old_centers: Vec<Point3i>, new_centers: Vec<Point3i>, } impl ClipMapUpdate3 { /// Prepare to run the `find_chunk_updates` method after the clipmap center has moved from `old_lod0_center` to /// `new_lod0_center`. pub fn new( config: &ClipMapConfig3, old_lod0_center: ChunkUnits<Point3i>, new_lod0_center: ChunkUnits<Point3i>, ) -> Self { Self { chunk_log2: config.chunk_shape.x().trailing_zeros() as i32, num_lods: config.num_lods, low_lod_boundary: config.clip_box_radius, high_lod_boundary: config.clip_box_radius >> 1, old_centers: all_lod_centers(old_lod0_center.0, config.num_lods), new_centers: all_lod_centers(new_lod0_center.0, config.num_lods), } } /// Traverse `octree` and find all chunks that need to be split or merged based on the movement of the center of the /// clipmap. pub fn find_chunk_updates( &self, octree: &OctreeSet, mut update_rx: impl FnMut(LodChunkUpdate3), ) { octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= self.num_lods || lod == 0 { return VisitStatus::Continue; } let old_offset_from_center = get_offset_from_lod_center(octant, &self.old_centers); let offset_from_center = get_offset_from_lod_center(octant, &self.new_centers); if old_offset_from_center > self.high_lod_boundary && offset_from_center <= self.high_lod_boundary { // Increase the detail for this octant. // Create the higher detail in descendant octants. let old_chunk = octant_chunk_key(self.chunk_log2, octant); let new_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.new_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, })); VisitStatus::Stop } else if offset_from_center > self.high_lod_boundary && old_offset_from_center <= self.high_lod_boundary { // Decrease the detail for this octant. // Delete the higher detail in descendant octants. let new_chunk = octant_chunk_key(self.chunk_log2, octant); let old_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.old_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, })); VisitStatus::Stop } else if offset_from_center > self.low_lod_boundary && old_offset_from_center > self.low_lod_boundary { VisitStatus::Stop } else { VisitStatus::Continue } }); } } fn all_lod_centers(lod0_center: Point3i, num_lods: u8) -> Vec<Point3i> { let mut centers = vec![lod0_center; num_lods as usize]; for i in 1..num_lods as usize { centers[i] = centers[i - 1] >> 1; } centers } fn find_merge_or_split_descendants( chunk_log2: i32, octree: &OctreeSet, node: &OctreeNode, centers: &[Point3i], high_lod_boundary: i32, ) -> Vec<ChunkKey3> { let mut matching_chunks = Vec::with_capacity(8); node.visit_all_octants_in_preorder(octree, &mut |node: &OctreeNode| { let lod = node.octant().exponent(); let old_offset_from_center = get_offset_from_lod_center(node.octant(), centers); if lod == 0 || old_offset_from_center > high_lod_boundary { matching_chunks.push(octant_chunk_key(chunk_log2, node.octant())); VisitStatus::Stop } else { VisitStatus::Continue } }); matching_chunks } fn get_offset_from_lod_center(octant: &Octant, centers: &[Point3i]) -> i32 { let lod = octant.exponent(); let lod_p = octant.minimum() >> lod; let lod_center = centers[lod as usize]; (lod_p - lod_center) // For calculating offsets from the clipmap center, we need to bias any nonnegative components to make voxel coordinates // symmetric about the center. // // Voxel Coordinates // // -3 -2 -1 0 1 2 3 // <--|---|---|---|---|---|---|--> // // Clipmap Coordinates // // -3 -2 -1 1 2 3 // <--|---|---|---|---|---|---|--> .map_components_unary(|c| if c >= 0 { c + 1 } else
) .abs() .max_component() } fn octant_chunk_key(chunk_log2: i32, octant: &Octant) -> ChunkKey3 { let lod = octant.exponent(); ChunkKey { lod, minimum: (octant.minimum() << chunk_log2) >> lod, } } // ████████╗███████╗███████╗████████╗ // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ // ██║ █████╗ ███████╗ ██║ // ██║ ██╔══╝ ╚════██║ ██║ // ██║ ███████╗███████║ ██║ // ╚═╝ ╚══════╝╚══════╝ ╚═╝ #[cfg(test)] mod test { use crate::dev_prelude::{ChunkUnits, SmallKeyHashSet}; use super::*; use itertools::Itertools; use std::iter::FromIterator; #[test] fn active_chunks_in_lod0_and_lod1() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let lod0_center = ChunkUnits(Point3i::ZERO); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let mut octree = OctreeSet::new_empty(domain); let filled_extent = Extent3i::from_min_and_shape(Point3i::fill(-4), Point3i::fill(8)); octree.add_extent(&filled_extent); let active_chunks = ActiveChunks::new(&config, &octree, lod0_center); let lod0_set = Extent3i::from_min_and_shape(Point3i::fill(-2), Point3i::fill(4)) .iter_points() .map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 0, }); let mut lod1_set = OctreeSet::new_empty(domain); lod1_set.add_extent(&Extent3i::from_min_and_shape( Point3i::fill(-2), Point3i::fill(4), )); lod1_set.subtract_extent(&Extent3i::from_min_and_shape( Point3i::fill(-1), Point3i::fill(2), )); let lod1_set = lod1_set.collect_all_points().into_iter().map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 1, }); let expected_keys = SmallKeyHashSet::from_iter(lod0_set.chain(lod1_set)); assert_eq!(active_chunks.keys, expected_keys); } #[test] fn no_updates_when_center_does_not_move() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); let centers = [ [0, 0, 0], [2, 0, 0], [-2, 0, 0], [0, 2, 0], [0, -2, 0], [0, 0, 2], [0, 0, -2], ]; for p in centers.iter().cloned() { let center = ChunkUnits(PointN(p)); ClipMapUpdate3::new(&config, center, center) .find_chunk_updates(&octree, |_update| panic!("Fail")); } } #[test] fn updates_are_consistent_with_active_chunks() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); validate_update_path( &config, &octree, &[ [0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 0, 0], [-1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0], [0, -1, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, -1], ], ); } fn validate_update_path(config: &ClipMapConfig3, octree: &OctreeSet, path: &[[i32; 3]]) { let mut active_chunks = ActiveChunks::new(&config, &octree, ChunkUnits(PointN(path[0]))); for (p1, p2) in path.iter().cloned().tuple_windows() { let old_lod0_center = ChunkUnits(PointN(p1)); let new_lod0_center = ChunkUnits(PointN(p2)); ClipMapUpdate3::new(config, old_lod0_center, new_lod0_center) .find_chunk_updates(octree, |update| active_chunks.apply_update(update)); // We should end up with the same result from moving the clipmap as we do just constructing it from scratch at the // new location. assert_eq!( active_chunks, ActiveChunks::new(config, octree, new_lod0_center), "Failed on edge: {:?} --> {:?}", p1, p2 ); } } /// This just stores the state of active chunks so that we can compare a known correct "active set" with one that has been /// modified via any number of calls to `apply_update`. #[derive(Debug, Eq, PartialEq)] struct ActiveChunks { keys: SmallKeyHashSet<ChunkKey3>, } impl ActiveChunks { fn new( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, ) -> Self { let mut keys = SmallKeyHashSet::new(); active_clipmap_lod_chunks(&config, &octree, lod0_center, |key| { keys.insert(key); }); Self { keys } } fn apply_update(&mut self, update: LodChunkUpdate3) { match update { LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, }) => { for chunk in old_chunks.into_iter() { self.keys.remove(&chunk); } assert!(!self.keys.contains(&new_chunk)); self.keys.insert(new_chunk); } LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, }) => { self.keys.remove(&old_chunk); for chunk in new_chunks.into_iter() { assert!(!self.keys.contains(&chunk)); self.keys.insert(chunk); } } } } } const CHUNK_SHAPE: Point3i = PointN([16; 3]); const NUM_LODS: u8 = 2; const CLIP_BOX_RADIUS: u16 = 2; }
{ c }
conditional_block
clipmap.rs
use crate::prelude::{ChunkKey, ChunkKey3, ChunkUnits, OctreeNode, OctreeSet, VisitStatus}; use building_blocks_core::prelude::*; #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct ClipMapConfig3 { /// The number of levels of detail. num_lods: u8, /// The radius (in chunks) of a clipbox at any level of detail. clip_box_radius: i32, /// The shape of every chunk, regardless of LOD. Note that while a chunk at a higher LOD takes up more world space, it has /// the same shape as chunks at lower levels, because the voxel size also changes. /// /// **WARNING**: As of now, chunks must be cubes. chunk_shape: Point3i, } impl ClipMapConfig3 { pub fn new(num_lods: u8, clip_box_radius: ChunkUnits<u16>, chunk_shape: Point3i) -> Self { assert!(clip_box_radius.0 >= 2); // Radius 1 doesn't work for any more than a single LOD, so why are you using a clipmap? assert!(chunk_shape.dimensions_are_powers_of_2()); Self { num_lods, clip_box_radius: clip_box_radius.0 as i32, chunk_shape, } } pub fn chunk_edge_length_log2(&self) -> i32 { assert!(self.chunk_shape.is_cube()); self.chunk_shape.x().trailing_zeros() as i32 } } /// Traverse `octree` to find the `ChunkKey3`s that are "active" when the clipmap is centered at `lod0_center`. `active_rx` /// is a callback that receives the chunk keys for active chunks. pub fn active_clipmap_lod_chunks( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, mut active_rx: impl FnMut(ChunkKey3), ) { let chunk_log2 = config.chunk_edge_length_log2(); let centers = all_lod_centers(lod0_center.0, config.num_lods); let high_lod_boundary = config.clip_box_radius >> 1; octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= config.num_lods { return VisitStatus::Continue; } let offset_from_center = get_offset_from_lod_center(octant, &centers); if lod == 0 || offset_from_center > high_lod_boundary { // This octant can be rendered at this level of detail. active_rx(octant_chunk_key(chunk_log2, octant)); VisitStatus::Stop } else { // This octant should be rendered with more detail. VisitStatus::Continue } }); } /// A notification that a chunk (at a particular level of detail) must be split or merged. This is usually the result of a /// camera movement. #[derive(Clone, Debug, Eq, PartialEq)] pub enum LodChunkUpdate<N> { Split(SplitChunk<N>), Merge(MergeChunks<N>), } /// A 3-dimensional `LodChunkUpdate`. pub type LodChunkUpdate3 = LodChunkUpdate<[i32; 3]>; /// Split `old_chunk` into many `new_chunks`. The number of new chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct SplitChunk<N> { pub old_chunk: ChunkKey<N>, pub new_chunks: Vec<ChunkKey<N>>, } /// Merge many `old_chunks` into `new_chunk`. The number of old chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct MergeChunks<N> { pub old_chunks: Vec<ChunkKey<N>>, pub new_chunk: ChunkKey<N>, } /// A transient object used for running the `find_chunk_updates` method on multiple octrees. pub struct ClipMapUpdate3 { chunk_log2: i32, num_lods: u8, low_lod_boundary: i32, high_lod_boundary: i32, old_centers: Vec<Point3i>, new_centers: Vec<Point3i>, } impl ClipMapUpdate3 { /// Prepare to run the `find_chunk_updates` method after the clipmap center has moved from `old_lod0_center` to /// `new_lod0_center`. pub fn new( config: &ClipMapConfig3, old_lod0_center: ChunkUnits<Point3i>, new_lod0_center: ChunkUnits<Point3i>, ) -> Self { Self { chunk_log2: config.chunk_shape.x().trailing_zeros() as i32, num_lods: config.num_lods, low_lod_boundary: config.clip_box_radius, high_lod_boundary: config.clip_box_radius >> 1, old_centers: all_lod_centers(old_lod0_center.0, config.num_lods), new_centers: all_lod_centers(new_lod0_center.0, config.num_lods), } } /// Traverse `octree` and find all chunks that need to be split or merged based on the movement of the center of the /// clipmap. pub fn
( &self, octree: &OctreeSet, mut update_rx: impl FnMut(LodChunkUpdate3), ) { octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= self.num_lods || lod == 0 { return VisitStatus::Continue; } let old_offset_from_center = get_offset_from_lod_center(octant, &self.old_centers); let offset_from_center = get_offset_from_lod_center(octant, &self.new_centers); if old_offset_from_center > self.high_lod_boundary && offset_from_center <= self.high_lod_boundary { // Increase the detail for this octant. // Create the higher detail in descendant octants. let old_chunk = octant_chunk_key(self.chunk_log2, octant); let new_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.new_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, })); VisitStatus::Stop } else if offset_from_center > self.high_lod_boundary && old_offset_from_center <= self.high_lod_boundary { // Decrease the detail for this octant. // Delete the higher detail in descendant octants. let new_chunk = octant_chunk_key(self.chunk_log2, octant); let old_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.old_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, })); VisitStatus::Stop } else if offset_from_center > self.low_lod_boundary && old_offset_from_center > self.low_lod_boundary { VisitStatus::Stop } else { VisitStatus::Continue } }); } } fn all_lod_centers(lod0_center: Point3i, num_lods: u8) -> Vec<Point3i> { let mut centers = vec![lod0_center; num_lods as usize]; for i in 1..num_lods as usize { centers[i] = centers[i - 1] >> 1; } centers } fn find_merge_or_split_descendants( chunk_log2: i32, octree: &OctreeSet, node: &OctreeNode, centers: &[Point3i], high_lod_boundary: i32, ) -> Vec<ChunkKey3> { let mut matching_chunks = Vec::with_capacity(8); node.visit_all_octants_in_preorder(octree, &mut |node: &OctreeNode| { let lod = node.octant().exponent(); let old_offset_from_center = get_offset_from_lod_center(node.octant(), centers); if lod == 0 || old_offset_from_center > high_lod_boundary { matching_chunks.push(octant_chunk_key(chunk_log2, node.octant())); VisitStatus::Stop } else { VisitStatus::Continue } }); matching_chunks } fn get_offset_from_lod_center(octant: &Octant, centers: &[Point3i]) -> i32 { let lod = octant.exponent(); let lod_p = octant.minimum() >> lod; let lod_center = centers[lod as usize]; (lod_p - lod_center) // For calculating offsets from the clipmap center, we need to bias any nonnegative components to make voxel coordinates // symmetric about the center. // // Voxel Coordinates // // -3 -2 -1 0 1 2 3 // <--|---|---|---|---|---|---|--> // // Clipmap Coordinates // // -3 -2 -1 1 2 3 // <--|---|---|---|---|---|---|--> .map_components_unary(|c| if c >= 0 { c + 1 } else { c }) .abs() .max_component() } fn octant_chunk_key(chunk_log2: i32, octant: &Octant) -> ChunkKey3 { let lod = octant.exponent(); ChunkKey { lod, minimum: (octant.minimum() << chunk_log2) >> lod, } } // ████████╗███████╗███████╗████████╗ // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ // ██║ █████╗ ███████╗ ██║ // ██║ ██╔══╝ ╚════██║ ██║ // ██║ ███████╗███████║ ██║ // ╚═╝ ╚══════╝╚══════╝ ╚═╝ #[cfg(test)] mod test { use crate::dev_prelude::{ChunkUnits, SmallKeyHashSet}; use super::*; use itertools::Itertools; use std::iter::FromIterator; #[test] fn active_chunks_in_lod0_and_lod1() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let lod0_center = ChunkUnits(Point3i::ZERO); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let mut octree = OctreeSet::new_empty(domain); let filled_extent = Extent3i::from_min_and_shape(Point3i::fill(-4), Point3i::fill(8)); octree.add_extent(&filled_extent); let active_chunks = ActiveChunks::new(&config, &octree, lod0_center); let lod0_set = Extent3i::from_min_and_shape(Point3i::fill(-2), Point3i::fill(4)) .iter_points() .map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 0, }); let mut lod1_set = OctreeSet::new_empty(domain); lod1_set.add_extent(&Extent3i::from_min_and_shape( Point3i::fill(-2), Point3i::fill(4), )); lod1_set.subtract_extent(&Extent3i::from_min_and_shape( Point3i::fill(-1), Point3i::fill(2), )); let lod1_set = lod1_set.collect_all_points().into_iter().map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 1, }); let expected_keys = SmallKeyHashSet::from_iter(lod0_set.chain(lod1_set)); assert_eq!(active_chunks.keys, expected_keys); } #[test] fn no_updates_when_center_does_not_move() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); let centers = [ [0, 0, 0], [2, 0, 0], [-2, 0, 0], [0, 2, 0], [0, -2, 0], [0, 0, 2], [0, 0, -2], ]; for p in centers.iter().cloned() { let center = ChunkUnits(PointN(p)); ClipMapUpdate3::new(&config, center, center) .find_chunk_updates(&octree, |_update| panic!("Fail")); } } #[test] fn updates_are_consistent_with_active_chunks() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); validate_update_path( &config, &octree, &[ [0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 0, 0], [-1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0], [0, -1, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, -1], ], ); } fn validate_update_path(config: &ClipMapConfig3, octree: &OctreeSet, path: &[[i32; 3]]) { let mut active_chunks = ActiveChunks::new(&config, &octree, ChunkUnits(PointN(path[0]))); for (p1, p2) in path.iter().cloned().tuple_windows() { let old_lod0_center = ChunkUnits(PointN(p1)); let new_lod0_center = ChunkUnits(PointN(p2)); ClipMapUpdate3::new(config, old_lod0_center, new_lod0_center) .find_chunk_updates(octree, |update| active_chunks.apply_update(update)); // We should end up with the same result from moving the clipmap as we do just constructing it from scratch at the // new location. assert_eq!( active_chunks, ActiveChunks::new(config, octree, new_lod0_center), "Failed on edge: {:?} --> {:?}", p1, p2 ); } } /// This just stores the state of active chunks so that we can compare a known correct "active set" with one that has been /// modified via any number of calls to `apply_update`. #[derive(Debug, Eq, PartialEq)] struct ActiveChunks { keys: SmallKeyHashSet<ChunkKey3>, } impl ActiveChunks { fn new( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, ) -> Self { let mut keys = SmallKeyHashSet::new(); active_clipmap_lod_chunks(&config, &octree, lod0_center, |key| { keys.insert(key); }); Self { keys } } fn apply_update(&mut self, update: LodChunkUpdate3) { match update { LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, }) => { for chunk in old_chunks.into_iter() { self.keys.remove(&chunk); } assert!(!self.keys.contains(&new_chunk)); self.keys.insert(new_chunk); } LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, }) => { self.keys.remove(&old_chunk); for chunk in new_chunks.into_iter() { assert!(!self.keys.contains(&chunk)); self.keys.insert(chunk); } } } } } const CHUNK_SHAPE: Point3i = PointN([16; 3]); const NUM_LODS: u8 = 2; const CLIP_BOX_RADIUS: u16 = 2; }
find_chunk_updates
identifier_name
clipmap.rs
use crate::prelude::{ChunkKey, ChunkKey3, ChunkUnits, OctreeNode, OctreeSet, VisitStatus}; use building_blocks_core::prelude::*; #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct ClipMapConfig3 { /// The number of levels of detail. num_lods: u8, /// The radius (in chunks) of a clipbox at any level of detail. clip_box_radius: i32, /// The shape of every chunk, regardless of LOD. Note that while a chunk at a higher LOD takes up more world space, it has /// the same shape as chunks at lower levels, because the voxel size also changes. /// /// **WARNING**: As of now, chunks must be cubes. chunk_shape: Point3i, } impl ClipMapConfig3 { pub fn new(num_lods: u8, clip_box_radius: ChunkUnits<u16>, chunk_shape: Point3i) -> Self { assert!(clip_box_radius.0 >= 2); // Radius 1 doesn't work for any more than a single LOD, so why are you using a clipmap? assert!(chunk_shape.dimensions_are_powers_of_2()); Self { num_lods, clip_box_radius: clip_box_radius.0 as i32, chunk_shape, } } pub fn chunk_edge_length_log2(&self) -> i32 { assert!(self.chunk_shape.is_cube()); self.chunk_shape.x().trailing_zeros() as i32 } } /// Traverse `octree` to find the `ChunkKey3`s that are "active" when the clipmap is centered at `lod0_center`. `active_rx` /// is a callback that receives the chunk keys for active chunks. pub fn active_clipmap_lod_chunks( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, mut active_rx: impl FnMut(ChunkKey3), ) { let chunk_log2 = config.chunk_edge_length_log2(); let centers = all_lod_centers(lod0_center.0, config.num_lods); let high_lod_boundary = config.clip_box_radius >> 1; octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= config.num_lods { return VisitStatus::Continue; } let offset_from_center = get_offset_from_lod_center(octant, &centers); if lod == 0 || offset_from_center > high_lod_boundary { // This octant can be rendered at this level of detail. active_rx(octant_chunk_key(chunk_log2, octant)); VisitStatus::Stop } else { // This octant should be rendered with more detail. VisitStatus::Continue } }); } /// A notification that a chunk (at a particular level of detail) must be split or merged. This is usually the result of a /// camera movement. #[derive(Clone, Debug, Eq, PartialEq)] pub enum LodChunkUpdate<N> { Split(SplitChunk<N>), Merge(MergeChunks<N>), } /// A 3-dimensional `LodChunkUpdate`. pub type LodChunkUpdate3 = LodChunkUpdate<[i32; 3]>; /// Split `old_chunk` into many `new_chunks`. The number of new chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct SplitChunk<N> { pub old_chunk: ChunkKey<N>, pub new_chunks: Vec<ChunkKey<N>>, } /// Merge many `old_chunks` into `new_chunk`. The number of old chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct MergeChunks<N> { pub old_chunks: Vec<ChunkKey<N>>, pub new_chunk: ChunkKey<N>, } /// A transient object used for running the `find_chunk_updates` method on multiple octrees. pub struct ClipMapUpdate3 { chunk_log2: i32, num_lods: u8, low_lod_boundary: i32, high_lod_boundary: i32, old_centers: Vec<Point3i>, new_centers: Vec<Point3i>, } impl ClipMapUpdate3 { /// Prepare to run the `find_chunk_updates` method after the clipmap center has moved from `old_lod0_center` to /// `new_lod0_center`. pub fn new( config: &ClipMapConfig3, old_lod0_center: ChunkUnits<Point3i>, new_lod0_center: ChunkUnits<Point3i>, ) -> Self { Self { chunk_log2: config.chunk_shape.x().trailing_zeros() as i32, num_lods: config.num_lods, low_lod_boundary: config.clip_box_radius, high_lod_boundary: config.clip_box_radius >> 1, old_centers: all_lod_centers(old_lod0_center.0, config.num_lods), new_centers: all_lod_centers(new_lod0_center.0, config.num_lods), } } /// Traverse `octree` and find all chunks that need to be split or merged based on the movement of the center of the /// clipmap. pub fn find_chunk_updates( &self, octree: &OctreeSet, mut update_rx: impl FnMut(LodChunkUpdate3), ) { octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= self.num_lods || lod == 0 { return VisitStatus::Continue; } let old_offset_from_center = get_offset_from_lod_center(octant, &self.old_centers); let offset_from_center = get_offset_from_lod_center(octant, &self.new_centers); if old_offset_from_center > self.high_lod_boundary && offset_from_center <= self.high_lod_boundary { // Increase the detail for this octant. // Create the higher detail in descendant octants. let old_chunk = octant_chunk_key(self.chunk_log2, octant); let new_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.new_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, })); VisitStatus::Stop } else if offset_from_center > self.high_lod_boundary && old_offset_from_center <= self.high_lod_boundary { // Decrease the detail for this octant. // Delete the higher detail in descendant octants. let new_chunk = octant_chunk_key(self.chunk_log2, octant); let old_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.old_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, })); VisitStatus::Stop } else if offset_from_center > self.low_lod_boundary && old_offset_from_center > self.low_lod_boundary { VisitStatus::Stop } else { VisitStatus::Continue } }); } } fn all_lod_centers(lod0_center: Point3i, num_lods: u8) -> Vec<Point3i> { let mut centers = vec![lod0_center; num_lods as usize]; for i in 1..num_lods as usize { centers[i] = centers[i - 1] >> 1; } centers } fn find_merge_or_split_descendants( chunk_log2: i32, octree: &OctreeSet, node: &OctreeNode, centers: &[Point3i], high_lod_boundary: i32, ) -> Vec<ChunkKey3> { let mut matching_chunks = Vec::with_capacity(8); node.visit_all_octants_in_preorder(octree, &mut |node: &OctreeNode| { let lod = node.octant().exponent(); let old_offset_from_center = get_offset_from_lod_center(node.octant(), centers); if lod == 0 || old_offset_from_center > high_lod_boundary { matching_chunks.push(octant_chunk_key(chunk_log2, node.octant())); VisitStatus::Stop } else { VisitStatus::Continue } }); matching_chunks } fn get_offset_from_lod_center(octant: &Octant, centers: &[Point3i]) -> i32 { let lod = octant.exponent(); let lod_p = octant.minimum() >> lod; let lod_center = centers[lod as usize]; (lod_p - lod_center) // For calculating offsets from the clipmap center, we need to bias any nonnegative components to make voxel coordinates // symmetric about the center. // // Voxel Coordinates // // -3 -2 -1 0 1 2 3 // <--|---|---|---|---|---|---|--> // // Clipmap Coordinates // // -3 -2 -1 1 2 3 // <--|---|---|---|---|---|---|--> .map_components_unary(|c| if c >= 0 { c + 1 } else { c }) .abs() .max_component() } fn octant_chunk_key(chunk_log2: i32, octant: &Octant) -> ChunkKey3 { let lod = octant.exponent(); ChunkKey { lod, minimum: (octant.minimum() << chunk_log2) >> lod, } } // ████████╗███████╗███████╗████████╗ // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ // ██║ █████╗ ███████╗ ██║ // ██║ ██╔══╝ ╚════██║ ██║ // ██║ ███████╗███████║ ██║ // ╚═╝ ╚══════╝╚══════╝ ╚═╝ #[cfg(test)] mod test { use crate::dev_prelude::{ChunkUnits, SmallKeyHashSet}; use super::*; use itertools::Itertools; use std::iter::FromIterator; #[test] fn active_chunks_in_lod0_and_lod1() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let lod0_center = ChunkUnits(Point3i::ZERO); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let mut octree = OctreeSet::new_empty(domain); let filled_extent = Extent3i::from_min_and_shape(Point3i::fill(-4), Point3i::fill(8)); octree.add_extent(&filled_extent); let active_chunks = ActiveChunks::new(&config, &octree, lod0_center); let lod0_set = Extent3i::from_min_and_shape(Point3i::fill(-2), Point3i::fill(4)) .iter_points() .map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 0, }); let mut lod1_set = OctreeSet::new_empty(domain); lod1_set.add_extent(&Extent3i::from_min_and_shape( Point3i::fill(-2), Point3i::fill(4), )); lod1_set.subtract_extent(&Extent3i::from_min_and_shape( Point3i::fill(-1), Point3i::fill(2), )); let lod1_set = lod1_set.collect_all_points().into_iter().map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 1, }); let expected_keys = SmallKeyHashSet::from_iter(lod0_set.chain(lod1_set)); assert_eq!(active_chunks.keys, expected_keys); } #[test] fn no_updates_when_center_does_not_move() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); let centers = [ [0, 0, 0], [2, 0, 0], [-2, 0, 0], [0, 2, 0], [0, -2, 0], [0, 0, 2], [0, 0, -2], ]; for p in centers.iter().cloned() { let center = ChunkUnits(PointN(p)); ClipMapUpdate3::new(&config, center, center) .find_chunk_updates(&octree, |_update| panic!("Fail")); } } #[test] fn updates_are_consistent_with_active_chunks() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); validate_update_path( &config, &octree, &[ [0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 0, 0], [-1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0], [0, -1, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, -1], ], ); } fn validate_update_path(config: &ClipMapConfig3, octree: &OctreeSet, path: &[[i32; 3]]) { let mut active_chunks = ActiveChunks::new(&config, &octree, ChunkUnits(PointN(path[0]))); for (p1, p2) in path.iter().cloned().tuple_windows() { let old_lod0_center = ChunkUnits(PointN(p1)); let new_lod0_center = ChunkUnits(PointN(p2));
// We should end up with the same result from moving the clipmap as we do just constructing it from scratch at the // new location. assert_eq!( active_chunks, ActiveChunks::new(config, octree, new_lod0_center), "Failed on edge: {:?} --> {:?}", p1, p2 ); } } /// This just stores the state of active chunks so that we can compare a known correct "active set" with one that has been /// modified via any number of calls to `apply_update`. #[derive(Debug, Eq, PartialEq)] struct ActiveChunks { keys: SmallKeyHashSet<ChunkKey3>, } impl ActiveChunks { fn new( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, ) -> Self { let mut keys = SmallKeyHashSet::new(); active_clipmap_lod_chunks(&config, &octree, lod0_center, |key| { keys.insert(key); }); Self { keys } } fn apply_update(&mut self, update: LodChunkUpdate3) { match update { LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, }) => { for chunk in old_chunks.into_iter() { self.keys.remove(&chunk); } assert!(!self.keys.contains(&new_chunk)); self.keys.insert(new_chunk); } LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, }) => { self.keys.remove(&old_chunk); for chunk in new_chunks.into_iter() { assert!(!self.keys.contains(&chunk)); self.keys.insert(chunk); } } } } } const CHUNK_SHAPE: Point3i = PointN([16; 3]); const NUM_LODS: u8 = 2; const CLIP_BOX_RADIUS: u16 = 2; }
ClipMapUpdate3::new(config, old_lod0_center, new_lod0_center) .find_chunk_updates(octree, |update| active_chunks.apply_update(update));
random_line_split
clipmap.rs
use crate::prelude::{ChunkKey, ChunkKey3, ChunkUnits, OctreeNode, OctreeSet, VisitStatus}; use building_blocks_core::prelude::*; #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct ClipMapConfig3 { /// The number of levels of detail. num_lods: u8, /// The radius (in chunks) of a clipbox at any level of detail. clip_box_radius: i32, /// The shape of every chunk, regardless of LOD. Note that while a chunk at a higher LOD takes up more world space, it has /// the same shape as chunks at lower levels, because the voxel size also changes. /// /// **WARNING**: As of now, chunks must be cubes. chunk_shape: Point3i, } impl ClipMapConfig3 { pub fn new(num_lods: u8, clip_box_radius: ChunkUnits<u16>, chunk_shape: Point3i) -> Self { assert!(clip_box_radius.0 >= 2); // Radius 1 doesn't work for any more than a single LOD, so why are you using a clipmap? assert!(chunk_shape.dimensions_are_powers_of_2()); Self { num_lods, clip_box_radius: clip_box_radius.0 as i32, chunk_shape, } } pub fn chunk_edge_length_log2(&self) -> i32 { assert!(self.chunk_shape.is_cube()); self.chunk_shape.x().trailing_zeros() as i32 } } /// Traverse `octree` to find the `ChunkKey3`s that are "active" when the clipmap is centered at `lod0_center`. `active_rx` /// is a callback that receives the chunk keys for active chunks. pub fn active_clipmap_lod_chunks( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, mut active_rx: impl FnMut(ChunkKey3), ) { let chunk_log2 = config.chunk_edge_length_log2(); let centers = all_lod_centers(lod0_center.0, config.num_lods); let high_lod_boundary = config.clip_box_radius >> 1; octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= config.num_lods { return VisitStatus::Continue; } let offset_from_center = get_offset_from_lod_center(octant, &centers); if lod == 0 || offset_from_center > high_lod_boundary { // This octant can be rendered at this level of detail. active_rx(octant_chunk_key(chunk_log2, octant)); VisitStatus::Stop } else { // This octant should be rendered with more detail. VisitStatus::Continue } }); } /// A notification that a chunk (at a particular level of detail) must be split or merged. This is usually the result of a /// camera movement. #[derive(Clone, Debug, Eq, PartialEq)] pub enum LodChunkUpdate<N> { Split(SplitChunk<N>), Merge(MergeChunks<N>), } /// A 3-dimensional `LodChunkUpdate`. pub type LodChunkUpdate3 = LodChunkUpdate<[i32; 3]>; /// Split `old_chunk` into many `new_chunks`. The number of new chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct SplitChunk<N> { pub old_chunk: ChunkKey<N>, pub new_chunks: Vec<ChunkKey<N>>, } /// Merge many `old_chunks` into `new_chunk`. The number of old chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct MergeChunks<N> { pub old_chunks: Vec<ChunkKey<N>>, pub new_chunk: ChunkKey<N>, } /// A transient object used for running the `find_chunk_updates` method on multiple octrees. pub struct ClipMapUpdate3 { chunk_log2: i32, num_lods: u8, low_lod_boundary: i32, high_lod_boundary: i32, old_centers: Vec<Point3i>, new_centers: Vec<Point3i>, } impl ClipMapUpdate3 { /// Prepare to run the `find_chunk_updates` method after the clipmap center has moved from `old_lod0_center` to /// `new_lod0_center`. pub fn new( config: &ClipMapConfig3, old_lod0_center: ChunkUnits<Point3i>, new_lod0_center: ChunkUnits<Point3i>, ) -> Self { Self { chunk_log2: config.chunk_shape.x().trailing_zeros() as i32, num_lods: config.num_lods, low_lod_boundary: config.clip_box_radius, high_lod_boundary: config.clip_box_radius >> 1, old_centers: all_lod_centers(old_lod0_center.0, config.num_lods), new_centers: all_lod_centers(new_lod0_center.0, config.num_lods), } } /// Traverse `octree` and find all chunks that need to be split or merged based on the movement of the center of the /// clipmap. pub fn find_chunk_updates( &self, octree: &OctreeSet, mut update_rx: impl FnMut(LodChunkUpdate3), ) { octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= self.num_lods || lod == 0 { return VisitStatus::Continue; } let old_offset_from_center = get_offset_from_lod_center(octant, &self.old_centers); let offset_from_center = get_offset_from_lod_center(octant, &self.new_centers); if old_offset_from_center > self.high_lod_boundary && offset_from_center <= self.high_lod_boundary { // Increase the detail for this octant. // Create the higher detail in descendant octants. let old_chunk = octant_chunk_key(self.chunk_log2, octant); let new_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.new_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, })); VisitStatus::Stop } else if offset_from_center > self.high_lod_boundary && old_offset_from_center <= self.high_lod_boundary { // Decrease the detail for this octant. // Delete the higher detail in descendant octants. let new_chunk = octant_chunk_key(self.chunk_log2, octant); let old_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.old_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, })); VisitStatus::Stop } else if offset_from_center > self.low_lod_boundary && old_offset_from_center > self.low_lod_boundary { VisitStatus::Stop } else { VisitStatus::Continue } }); } } fn all_lod_centers(lod0_center: Point3i, num_lods: u8) -> Vec<Point3i> { let mut centers = vec![lod0_center; num_lods as usize]; for i in 1..num_lods as usize { centers[i] = centers[i - 1] >> 1; } centers } fn find_merge_or_split_descendants( chunk_log2: i32, octree: &OctreeSet, node: &OctreeNode, centers: &[Point3i], high_lod_boundary: i32, ) -> Vec<ChunkKey3> { let mut matching_chunks = Vec::with_capacity(8); node.visit_all_octants_in_preorder(octree, &mut |node: &OctreeNode| { let lod = node.octant().exponent(); let old_offset_from_center = get_offset_from_lod_center(node.octant(), centers); if lod == 0 || old_offset_from_center > high_lod_boundary { matching_chunks.push(octant_chunk_key(chunk_log2, node.octant())); VisitStatus::Stop } else { VisitStatus::Continue } }); matching_chunks } fn get_offset_from_lod_center(octant: &Octant, centers: &[Point3i]) -> i32
fn octant_chunk_key(chunk_log2: i32, octant: &Octant) -> ChunkKey3 { let lod = octant.exponent(); ChunkKey { lod, minimum: (octant.minimum() << chunk_log2) >> lod, } } // ████████╗███████╗███████╗████████╗ // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ // ██║ █████╗ ███████╗ ██║ // ██║ ██╔══╝ ╚════██║ ██║ // ██║ ███████╗███████║ ██║ // ╚═╝ ╚══════╝╚══════╝ ╚═╝ #[cfg(test)] mod test { use crate::dev_prelude::{ChunkUnits, SmallKeyHashSet}; use super::*; use itertools::Itertools; use std::iter::FromIterator; #[test] fn active_chunks_in_lod0_and_lod1() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let lod0_center = ChunkUnits(Point3i::ZERO); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let mut octree = OctreeSet::new_empty(domain); let filled_extent = Extent3i::from_min_and_shape(Point3i::fill(-4), Point3i::fill(8)); octree.add_extent(&filled_extent); let active_chunks = ActiveChunks::new(&config, &octree, lod0_center); let lod0_set = Extent3i::from_min_and_shape(Point3i::fill(-2), Point3i::fill(4)) .iter_points() .map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 0, }); let mut lod1_set = OctreeSet::new_empty(domain); lod1_set.add_extent(&Extent3i::from_min_and_shape( Point3i::fill(-2), Point3i::fill(4), )); lod1_set.subtract_extent(&Extent3i::from_min_and_shape( Point3i::fill(-1), Point3i::fill(2), )); let lod1_set = lod1_set.collect_all_points().into_iter().map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 1, }); let expected_keys = SmallKeyHashSet::from_iter(lod0_set.chain(lod1_set)); assert_eq!(active_chunks.keys, expected_keys); } #[test] fn no_updates_when_center_does_not_move() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); let centers = [ [0, 0, 0], [2, 0, 0], [-2, 0, 0], [0, 2, 0], [0, -2, 0], [0, 0, 2], [0, 0, -2], ]; for p in centers.iter().cloned() { let center = ChunkUnits(PointN(p)); ClipMapUpdate3::new(&config, center, center) .find_chunk_updates(&octree, |_update| panic!("Fail")); } } #[test] fn updates_are_consistent_with_active_chunks() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); validate_update_path( &config, &octree, &[ [0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 0, 0], [-1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0], [0, -1, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, -1], ], ); } fn validate_update_path(config: &ClipMapConfig3, octree: &OctreeSet, path: &[[i32; 3]]) { let mut active_chunks = ActiveChunks::new(&config, &octree, ChunkUnits(PointN(path[0]))); for (p1, p2) in path.iter().cloned().tuple_windows() { let old_lod0_center = ChunkUnits(PointN(p1)); let new_lod0_center = ChunkUnits(PointN(p2)); ClipMapUpdate3::new(config, old_lod0_center, new_lod0_center) .find_chunk_updates(octree, |update| active_chunks.apply_update(update)); // We should end up with the same result from moving the clipmap as we do just constructing it from scratch at the // new location. assert_eq!( active_chunks, ActiveChunks::new(config, octree, new_lod0_center), "Failed on edge: {:?} --> {:?}", p1, p2 ); } } /// This just stores the state of active chunks so that we can compare a known correct "active set" with one that has been /// modified via any number of calls to `apply_update`. #[derive(Debug, Eq, PartialEq)] struct ActiveChunks { keys: SmallKeyHashSet<ChunkKey3>, } impl ActiveChunks { fn new( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, ) -> Self { let mut keys = SmallKeyHashSet::new(); active_clipmap_lod_chunks(&config, &octree, lod0_center, |key| { keys.insert(key); }); Self { keys } } fn apply_update(&mut self, update: LodChunkUpdate3) { match update { LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, }) => { for chunk in old_chunks.into_iter() { self.keys.remove(&chunk); } assert!(!self.keys.contains(&new_chunk)); self.keys.insert(new_chunk); } LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, }) => { self.keys.remove(&old_chunk); for chunk in new_chunks.into_iter() { assert!(!self.keys.contains(&chunk)); self.keys.insert(chunk); } } } } } const CHUNK_SHAPE: Point3i = PointN([16; 3]); const NUM_LODS: u8 = 2; const CLIP_BOX_RADIUS: u16 = 2; }
{ let lod = octant.exponent(); let lod_p = octant.minimum() >> lod; let lod_center = centers[lod as usize]; (lod_p - lod_center) // For calculating offsets from the clipmap center, we need to bias any nonnegative components to make voxel coordinates // symmetric about the center. // // Voxel Coordinates // // -3 -2 -1 0 1 2 3 // <--|---|---|---|---|---|---|--> // // Clipmap Coordinates // // -3 -2 -1 1 2 3 // <--|---|---|---|---|---|---|--> .map_components_unary(|c| if c >= 0 { c + 1 } else { c }) .abs() .max_component() }
identifier_body
message.go
package services import ( "context" "crypto/md5" "encoding/base64" "encoding/json" "errors" "fmt" "io" "log" "strconv" "strings" "time" "unicode/utf8" bot "github.com/MixinNetwork/bot-api-go-client" number "github.com/MixinNetwork/go-number" "github.com/MixinNetwork/supergroup.mixin.one/config" "github.com/MixinNetwork/supergroup.mixin.one/models" "github.com/MixinNetwork/supergroup.mixin.one/plugin" "github.com/MixinNetwork/supergroup.mixin.one/session" "github.com/fox-one/mixin-sdk" "github.com/gofrs/uuid" ) type TransferView struct { Type string `json:"type"` SnapshotId string `json:"snapshot_id"` CounterUserId string `json:"counter_user_id"` AssetId string `json:"asset_id"` Amount string `json:"amount"` TraceId string `json:"trace_id"` Memo string `json:"memo"` CreatedAt time.Time `json:"created_at"` } type MessageService struct{} type MessageContext struct { user *mixin.User bc chan WsBroadcastMessage recipientID map[string]time.Time } func (mc *MessageContext) OnMessage(ctx context.Context, msg *mixin.MessageView, userID string) error { if msg.Category == "SYSTEM_ACCOUNT_SNAPSHOT" && msg.UserID != config.AppConfig.Mixin.ClientId { data, err := base64.StdEncoding.DecodeString(msg.Data) if err != nil { return session.BlazeServerError(ctx, err) } var transfer TransferView err = json.Unmarshal(data, &transfer) if err != nil { return session.BlazeServerError(ctx, err) } err = handleTransfer(ctx, mc, transfer, msg.UserID) if err != nil { return session.BlazeServerError(ctx, err) } } else if msg.ConversationID == models.UniqueConversationId(config.AppConfig.Mixin.ClientId, msg.UserID) { if err := handleMessage(ctx, mc, msg, mc.bc); err != nil { return err } } return nil } func (mc *MessageContext) OnAckReceipt(ctx context.Context, msg *mixin.MessageView, userID string) error { if msg.Status != "READ" { return nil } id, err := models.FindDistributedMessageRecipientId(ctx, msg.MessageID) if err != nil { session.Logger(ctx).Error("ACKNOWLEDGE_MESSAGE_RECEIPT FindDistributedMessageRecipientId", err) return nil } if id == "" { return nil } if time.Since(mc.recipientID[id]) > models.UserActivePeriod { if err := models.PingUserActiveAt(ctx, id); err != nil { session.Logger(ctx).Error("ACKNOWLEDGE_MESSAGE_RECEIPT PingUserActiveAt", err) } mc.recipientID[id] = time.Now() } return nil } type TransferMemoInst struct { Action string `json:"a"` Param1 string `json:"p1"` Param2 string `json:"p2"` } func (service *MessageService) Run(ctx context.Context, broadcastChan chan WsBroadcastMessage) error { go distribute(ctx) go loopPendingMessage(ctx) go handlePendingParticipants(ctx) go handleExpiredPackets(ctx) go schedulePluginCronJob(ctx) user, err := mixin.NewUser( config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, ) if err != nil { panic(err) } mc := &MessageContext{ user: user, bc: broadcastChan, recipientID: map[string]time.Time{}, } for { b := mixin.NewBlazeClient(user) if err := b.Loop(ctx, mc); err != nil { session.Logger(ctx).Error(err) } session.Logger(ctx).Info("connection loop end") time.Sleep(300 * time.Millisecond) } } func
(ctx context.Context, mc *MessageContext, transfer TransferView, userId string) error { id, err := bot.UuidFromString(transfer.TraceId) if err != nil { return nil } user, err := models.FindUser(ctx, userId) if user == nil || err != nil { log.Println("No such a user", userId) return err } if inst, err := crackTransferProtocol(ctx, mc, transfer, user); err == nil && inst.Action != "" { if inst.Action == "rewards" { return handleRewardsPayment(ctx, mc, transfer, user, inst) } else { log.Println("Unknown instruction", inst) } } else { log.Println("Incorrect inst, fallback: ", transfer.TraceId, transfer.Memo, err) if user.TraceId == transfer.TraceId { log.Println("New legacy payment", userId, transfer.TraceId) if transfer.Amount == config.AppConfig.System.PaymentAmount && transfer.AssetId == config.AppConfig.System.PaymentAssetId { return user.Payment(ctx) } for _, asset := range config.AppConfig.System.AccpetPaymentAssetList { if number.FromString(transfer.Amount).Equal(number.FromString(asset.Amount).RoundFloor(8)) && transfer.AssetId == asset.AssetId { return user.Payment(ctx) } } } else if order, err := models.GetOrder(ctx, transfer.TraceId); err == nil && order != nil { log.Println("New order received", userId, transfer.TraceId) return handleOrderPayment(ctx, mc, transfer, order) } else if packet, err := models.PayPacket(ctx, id.String(), transfer.AssetId, transfer.Amount); err != nil || packet == nil { log.Println("New packet paid", userId, transfer.TraceId, id) return err } else if packet.State == models.PacketStatePaid { log.Println("New packet prepared", userId, transfer.TraceId, packet.PacketId) return sendAppCard(ctx, mc, packet) } } return nil } func crackTransferProtocol(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User) (*TransferMemoInst, error) { var data *TransferMemoInst err := json.Unmarshal([]byte(transfer.Memo), &data) return data, err } func handleRewardsPayment(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User, inst *TransferMemoInst) error { userId := inst.Param1 targetUser, err := models.FindUser(ctx, userId) if err != nil { log.Println("can't find user to reward", userId, err) return nil } memo := "Rewards from " + strconv.FormatInt(user.IdentityNumber, 10) log.Println("Rewards from " + user.FullName + " to " + targetUser.UserId + " with traceID " + transfer.SnapshotId) var traceID string traceID = transfer.SnapshotId if err != nil { return errors.New("generate trace id failed") } in := &bot.TransferInput{ AssetId: transfer.AssetId, RecipientId: targetUser.UserId, Amount: number.FromString(transfer.Amount), TraceId: traceID, Memo: memo, } if err := bot.CreateTransfer(ctx, in, config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, config.AppConfig.Mixin.SessionAssetPIN, config.AppConfig.Mixin.PinToken); err != nil { log.Println("can't transfer to recipient", err) return err } if user.UserId != targetUser.UserId { if err := models.CreateTip(ctx, user.UserId, targetUser.UserId, transfer.AssetId, transfer.Amount, traceID, transfer.CreatedAt); err != nil { log.Println("can't record tip", err) // return err } if err := models.CreateRewardsMessage(ctx, user, targetUser, transfer.Amount, inst.Param2); err != nil { log.Println("can't create rewards message", err) // return err } } return nil } func handleOrderPayment(ctx context.Context, mc *MessageContext, transfer TransferView, order *models.Order) error { if order.PayMethod == models.PayMethodMixin && number.FromString(transfer.Amount).Equal(number.FromString(order.Amount).RoundFloor(8)) && order.AssetId == transfer.AssetId { _, err := models.MarkOrderAsPaidByOrderId(ctx, order.OrderId) if err != nil { log.Println(err) return err } } return nil } func sendAppCard(ctx context.Context, mc *MessageContext, packet *models.Packet) error { description := fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, packet.User.FullName) if strings.TrimSpace(packet.User.FullName) == "" { description = config.AppConfig.MessageTemplate.GroupRedPacketShortDesc } if count := utf8.RuneCountInString(description); count > 100 { name := string([]rune(packet.User.FullName)[:16]) description = fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, name) } host := config.AppConfig.Service.HTTPResourceHost if config.AppConfig.System.RouterMode == config.RouterModeHash { host = host + config.RouterModeHashSymbol } card, err := json.Marshal(map[string]string{ "icon_url": "https://images.mixin.one/X44V48LK9oEBT3izRGKqdVSPfiH5DtYTzzF0ch5nP-f7tO4v0BTTqVhFEHqd52qUeuVas-BSkLH1ckxEI51-jXmF=s256", "title": config.AppConfig.MessageTemplate.GroupRedPacket, "description": description, "action": host + "/packets/" + packet.PacketId, }) if err != nil { return session.BlazeServerError(ctx, err) } t := time.Now() u := &models.User{UserId: config.AppConfig.Mixin.ClientId, ActiveAt: time.Now()} _, err = models.CreateMessage(ctx, u, packet.PacketId, models.MessageCategoryAppCard, "", base64.StdEncoding.EncodeToString(card), t, t) if err != nil { return session.BlazeServerError(ctx, err) } return nil } func handleExpiredPackets(ctx context.Context) { var limit = 100 for { packetIds, err := models.ListExpiredPackets(ctx, limit) if err != nil { session.Logger(ctx).Error(err) time.Sleep(300 * time.Millisecond) continue } for _, id := range packetIds { packet, err := models.SendPacketRefundTransfer(ctx, id) if err != nil { session.Logger(ctx).Infof("REFUND ERROR %v, %v\n", id, err) break } if packet != nil { session.Logger(ctx).Infof("REFUND %v\n", id) } } if len(packetIds) < limit { time.Sleep(300 * time.Millisecond) continue } } } func schedulePluginCronJob(ctx context.Context) { plugin.RunCron() } func handlePendingParticipants(ctx context.Context) { var limit = 100 for { participants, err := models.ListPendingParticipants(ctx, limit) if err != nil { session.Logger(ctx).Error(err) time.Sleep(300 * time.Millisecond) continue } for _, p := range participants { err = models.SendParticipantTransfer(ctx, p.PacketId, p.UserId, p.Amount) if err != nil { session.Logger(ctx).Error(err) break } } if len(participants) < limit { time.Sleep(300 * time.Millisecond) continue } } } func handleMessage(ctx context.Context, mc *MessageContext, message *mixin.MessageView, broadcastChan chan WsBroadcastMessage) error { user, err := models.FindUser(ctx, message.UserID) if err != nil { return err } if user == nil || user.State != models.PaymentStatePaid { return sendHelpMessage(ctx, user, mc, message) } if time.Since(user.ActiveAt) > models.UserActivePeriod { err = models.PingUserActiveAt(ctx, user.UserId) if err != nil { session.Logger(ctx).Error("handleMessage PingUserActiveAt", err) } } if user.SubscribedAt.IsZero() { return sendTextMessage(ctx, mc, message.ConversationID, config.AppConfig.MessageTemplate.MessageTipsUnsubscribe) } dataBytes, err := base64.StdEncoding.DecodeString(message.Data) if err != nil { return session.BadDataError(ctx) } else if len(dataBytes) < 10 { if strings.ToUpper(string(dataBytes)) == config.AppConfig.MessageTemplate.MessageCommandsInfo { if count, err := models.SubscribersCount(ctx); err != nil { return err } else { return sendTextMessage(ctx, mc, message.ConversationID, fmt.Sprintf(config.AppConfig.MessageTemplate.MessageCommandsInfoResp, count)) } } } // broadcast if isBroadcastOn, err := models.ReadBroadcastProperty(ctx); err == nil && isBroadcastOn == "on" { go func() { if bmsg, err := decodeMessage(ctx, user, message); err == nil { broadcastChan <- bmsg } }() } if _, err := models.CreateMessage(ctx, user, message.MessageID, message.Category, message.QuoteMessageID, message.Data, message.CreatedAt, message.UpdatedAt); err != nil { return err } return nil } func sendHelpMessage(ctx context.Context, user *models.User, mc *MessageContext, message *mixin.MessageView) error { if err := sendTextMessage(ctx, mc, message.ConversationID, config.AppConfig.MessageTemplate.MessageTipsHelp); err != nil { return err } if err := sendAppButton(ctx, mc, config.AppConfig.MessageTemplate.MessageTipsHelpBtn, message.ConversationID, config.AppConfig.Service.HTTPResourceHost); err != nil { return err } return nil } func decodeMessage(ctx context.Context, user *models.User, message *mixin.MessageView) (WsBroadcastMessage, error) { var bmsg WsBroadcastMessage bmsg.Category = message.Category bmsg.MessageId = message.MessageID bmsg.CreatedAt = message.UpdatedAt bmsg.Data = message.Data bmsg.SpeakerId = user.UserId bmsg.SpeakerName = user.FullName bmsg.SpeakerAvatar = user.AvatarURL if message.Category == "PLAIN_TEXT" { bytes, _ := base64.StdEncoding.DecodeString(message.Data) bmsg.Text = string(bytes) return bmsg, nil } if message.Category != "PLAIN_IMAGE" && message.Category != "PLAIN_VIDEO" && message.Category != "PLAIN_AUDIO" && message.Category != "PLAIN_DATA" { return bmsg, nil } data, err := base64.StdEncoding.DecodeString(message.Data) if err != nil { log.Println("message data decode error", err) return bmsg, err } att, err := attachmentFromMixinJSON(string(data)) if err != nil { log.Println("decode attachment error", err) return bmsg, err } attResp, err := bot.AttachemntShow(ctx, config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, att.ID) if err != nil { log.Println("get attachment details error", err) } att.ViewUrl = attResp.ViewURL bmsg.Attachment = att return bmsg, nil } func attachmentFromMixinJSON(jsonString string) (att WsBroadcastMessageAttachment, err error) { var data struct { ID string `json:"attachment_id"` Size int `json:"size"` MimeType string `json:"mime_type"` Name *string `json:"name"` Duration *uint `json:"duration"` Waveform *string `json:"waveform"` Width *uint `json:"width"` Height *uint `json:"height"` Thumbnail *string `json:"thumbnail"` } err = json.Unmarshal([]byte(jsonString), &data) if err != nil { return } att.ID = data.ID att.Size = data.Size att.MimeType = data.MimeType att.Duration = data.Duration if data.Waveform != nil { att.Waveform, err = base64.StdEncoding.DecodeString(*data.Waveform) if err != nil { return } } att.Name = data.Name att.Width = data.Width att.Height = data.Height if data.Thumbnail != nil { att.Thumbnail, err = base64.StdEncoding.DecodeString(*data.Thumbnail) if err != nil { return } } return } func generateRewardTraceID(originTraceID string) (string, error) { h := md5.New() io.WriteString(h, originTraceID) io.WriteString(h, "REWARD") sum := h.Sum(nil) sum[6] = (sum[6] & 0x0f) | 0x30 sum[8] = (sum[8] & 0x3f) | 0x80 id, err := uuid.FromBytes(sum) return id.String(), err }
handleTransfer
identifier_name
message.go
package services import ( "context" "crypto/md5" "encoding/base64" "encoding/json" "errors" "fmt" "io" "log" "strconv" "strings" "time" "unicode/utf8" bot "github.com/MixinNetwork/bot-api-go-client" number "github.com/MixinNetwork/go-number" "github.com/MixinNetwork/supergroup.mixin.one/config" "github.com/MixinNetwork/supergroup.mixin.one/models" "github.com/MixinNetwork/supergroup.mixin.one/plugin" "github.com/MixinNetwork/supergroup.mixin.one/session" "github.com/fox-one/mixin-sdk" "github.com/gofrs/uuid" ) type TransferView struct { Type string `json:"type"` SnapshotId string `json:"snapshot_id"` CounterUserId string `json:"counter_user_id"` AssetId string `json:"asset_id"` Amount string `json:"amount"` TraceId string `json:"trace_id"` Memo string `json:"memo"` CreatedAt time.Time `json:"created_at"` } type MessageService struct{} type MessageContext struct { user *mixin.User bc chan WsBroadcastMessage recipientID map[string]time.Time } func (mc *MessageContext) OnMessage(ctx context.Context, msg *mixin.MessageView, userID string) error { if msg.Category == "SYSTEM_ACCOUNT_SNAPSHOT" && msg.UserID != config.AppConfig.Mixin.ClientId { data, err := base64.StdEncoding.DecodeString(msg.Data) if err != nil { return session.BlazeServerError(ctx, err) } var transfer TransferView err = json.Unmarshal(data, &transfer) if err != nil { return session.BlazeServerError(ctx, err) } err = handleTransfer(ctx, mc, transfer, msg.UserID) if err != nil { return session.BlazeServerError(ctx, err) } } else if msg.ConversationID == models.UniqueConversationId(config.AppConfig.Mixin.ClientId, msg.UserID) { if err := handleMessage(ctx, mc, msg, mc.bc); err != nil { return err } } return nil } func (mc *MessageContext) OnAckReceipt(ctx context.Context, msg *mixin.MessageView, userID string) error { if msg.Status != "READ" { return nil } id, err := models.FindDistributedMessageRecipientId(ctx, msg.MessageID) if err != nil { session.Logger(ctx).Error("ACKNOWLEDGE_MESSAGE_RECEIPT FindDistributedMessageRecipientId", err) return nil } if id == "" { return nil } if time.Since(mc.recipientID[id]) > models.UserActivePeriod { if err := models.PingUserActiveAt(ctx, id); err != nil { session.Logger(ctx).Error("ACKNOWLEDGE_MESSAGE_RECEIPT PingUserActiveAt", err) } mc.recipientID[id] = time.Now() } return nil } type TransferMemoInst struct { Action string `json:"a"` Param1 string `json:"p1"` Param2 string `json:"p2"` } func (service *MessageService) Run(ctx context.Context, broadcastChan chan WsBroadcastMessage) error { go distribute(ctx) go loopPendingMessage(ctx) go handlePendingParticipants(ctx) go handleExpiredPackets(ctx) go schedulePluginCronJob(ctx) user, err := mixin.NewUser( config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, ) if err != nil { panic(err) } mc := &MessageContext{ user: user, bc: broadcastChan, recipientID: map[string]time.Time{}, } for { b := mixin.NewBlazeClient(user) if err := b.Loop(ctx, mc); err != nil { session.Logger(ctx).Error(err) } session.Logger(ctx).Info("connection loop end") time.Sleep(300 * time.Millisecond) } } func handleTransfer(ctx context.Context, mc *MessageContext, transfer TransferView, userId string) error { id, err := bot.UuidFromString(transfer.TraceId) if err != nil { return nil } user, err := models.FindUser(ctx, userId) if user == nil || err != nil { log.Println("No such a user", userId) return err } if inst, err := crackTransferProtocol(ctx, mc, transfer, user); err == nil && inst.Action != "" { if inst.Action == "rewards" { return handleRewardsPayment(ctx, mc, transfer, user, inst) } else { log.Println("Unknown instruction", inst) } } else { log.Println("Incorrect inst, fallback: ", transfer.TraceId, transfer.Memo, err) if user.TraceId == transfer.TraceId { log.Println("New legacy payment", userId, transfer.TraceId) if transfer.Amount == config.AppConfig.System.PaymentAmount && transfer.AssetId == config.AppConfig.System.PaymentAssetId { return user.Payment(ctx) } for _, asset := range config.AppConfig.System.AccpetPaymentAssetList { if number.FromString(transfer.Amount).Equal(number.FromString(asset.Amount).RoundFloor(8)) && transfer.AssetId == asset.AssetId { return user.Payment(ctx) } } } else if order, err := models.GetOrder(ctx, transfer.TraceId); err == nil && order != nil { log.Println("New order received", userId, transfer.TraceId) return handleOrderPayment(ctx, mc, transfer, order) } else if packet, err := models.PayPacket(ctx, id.String(), transfer.AssetId, transfer.Amount); err != nil || packet == nil { log.Println("New packet paid", userId, transfer.TraceId, id) return err } else if packet.State == models.PacketStatePaid { log.Println("New packet prepared", userId, transfer.TraceId, packet.PacketId) return sendAppCard(ctx, mc, packet) } } return nil } func crackTransferProtocol(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User) (*TransferMemoInst, error) { var data *TransferMemoInst err := json.Unmarshal([]byte(transfer.Memo), &data) return data, err } func handleRewardsPayment(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User, inst *TransferMemoInst) error { userId := inst.Param1 targetUser, err := models.FindUser(ctx, userId) if err != nil { log.Println("can't find user to reward", userId, err) return nil } memo := "Rewards from " + strconv.FormatInt(user.IdentityNumber, 10) log.Println("Rewards from " + user.FullName + " to " + targetUser.UserId + " with traceID " + transfer.SnapshotId) var traceID string traceID = transfer.SnapshotId if err != nil { return errors.New("generate trace id failed") } in := &bot.TransferInput{ AssetId: transfer.AssetId, RecipientId: targetUser.UserId, Amount: number.FromString(transfer.Amount), TraceId: traceID, Memo: memo, } if err := bot.CreateTransfer(ctx, in, config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, config.AppConfig.Mixin.SessionAssetPIN, config.AppConfig.Mixin.PinToken); err != nil { log.Println("can't transfer to recipient", err) return err } if user.UserId != targetUser.UserId { if err := models.CreateTip(ctx, user.UserId, targetUser.UserId, transfer.AssetId, transfer.Amount, traceID, transfer.CreatedAt); err != nil { log.Println("can't record tip", err) // return err
} if err := models.CreateRewardsMessage(ctx, user, targetUser, transfer.Amount, inst.Param2); err != nil { log.Println("can't create rewards message", err) // return err } } return nil } func handleOrderPayment(ctx context.Context, mc *MessageContext, transfer TransferView, order *models.Order) error { if order.PayMethod == models.PayMethodMixin && number.FromString(transfer.Amount).Equal(number.FromString(order.Amount).RoundFloor(8)) && order.AssetId == transfer.AssetId { _, err := models.MarkOrderAsPaidByOrderId(ctx, order.OrderId) if err != nil { log.Println(err) return err } } return nil } func sendAppCard(ctx context.Context, mc *MessageContext, packet *models.Packet) error { description := fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, packet.User.FullName) if strings.TrimSpace(packet.User.FullName) == "" { description = config.AppConfig.MessageTemplate.GroupRedPacketShortDesc } if count := utf8.RuneCountInString(description); count > 100 { name := string([]rune(packet.User.FullName)[:16]) description = fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, name) } host := config.AppConfig.Service.HTTPResourceHost if config.AppConfig.System.RouterMode == config.RouterModeHash { host = host + config.RouterModeHashSymbol } card, err := json.Marshal(map[string]string{ "icon_url": "https://images.mixin.one/X44V48LK9oEBT3izRGKqdVSPfiH5DtYTzzF0ch5nP-f7tO4v0BTTqVhFEHqd52qUeuVas-BSkLH1ckxEI51-jXmF=s256", "title": config.AppConfig.MessageTemplate.GroupRedPacket, "description": description, "action": host + "/packets/" + packet.PacketId, }) if err != nil { return session.BlazeServerError(ctx, err) } t := time.Now() u := &models.User{UserId: config.AppConfig.Mixin.ClientId, ActiveAt: time.Now()} _, err = models.CreateMessage(ctx, u, packet.PacketId, models.MessageCategoryAppCard, "", base64.StdEncoding.EncodeToString(card), t, t) if err != nil { return session.BlazeServerError(ctx, err) } return nil } func handleExpiredPackets(ctx context.Context) { var limit = 100 for { packetIds, err := models.ListExpiredPackets(ctx, limit) if err != nil { session.Logger(ctx).Error(err) time.Sleep(300 * time.Millisecond) continue } for _, id := range packetIds { packet, err := models.SendPacketRefundTransfer(ctx, id) if err != nil { session.Logger(ctx).Infof("REFUND ERROR %v, %v\n", id, err) break } if packet != nil { session.Logger(ctx).Infof("REFUND %v\n", id) } } if len(packetIds) < limit { time.Sleep(300 * time.Millisecond) continue } } } func schedulePluginCronJob(ctx context.Context) { plugin.RunCron() } func handlePendingParticipants(ctx context.Context) { var limit = 100 for { participants, err := models.ListPendingParticipants(ctx, limit) if err != nil { session.Logger(ctx).Error(err) time.Sleep(300 * time.Millisecond) continue } for _, p := range participants { err = models.SendParticipantTransfer(ctx, p.PacketId, p.UserId, p.Amount) if err != nil { session.Logger(ctx).Error(err) break } } if len(participants) < limit { time.Sleep(300 * time.Millisecond) continue } } } func handleMessage(ctx context.Context, mc *MessageContext, message *mixin.MessageView, broadcastChan chan WsBroadcastMessage) error { user, err := models.FindUser(ctx, message.UserID) if err != nil { return err } if user == nil || user.State != models.PaymentStatePaid { return sendHelpMessage(ctx, user, mc, message) } if time.Since(user.ActiveAt) > models.UserActivePeriod { err = models.PingUserActiveAt(ctx, user.UserId) if err != nil { session.Logger(ctx).Error("handleMessage PingUserActiveAt", err) } } if user.SubscribedAt.IsZero() { return sendTextMessage(ctx, mc, message.ConversationID, config.AppConfig.MessageTemplate.MessageTipsUnsubscribe) } dataBytes, err := base64.StdEncoding.DecodeString(message.Data) if err != nil { return session.BadDataError(ctx) } else if len(dataBytes) < 10 { if strings.ToUpper(string(dataBytes)) == config.AppConfig.MessageTemplate.MessageCommandsInfo { if count, err := models.SubscribersCount(ctx); err != nil { return err } else { return sendTextMessage(ctx, mc, message.ConversationID, fmt.Sprintf(config.AppConfig.MessageTemplate.MessageCommandsInfoResp, count)) } } } // broadcast if isBroadcastOn, err := models.ReadBroadcastProperty(ctx); err == nil && isBroadcastOn == "on" { go func() { if bmsg, err := decodeMessage(ctx, user, message); err == nil { broadcastChan <- bmsg } }() } if _, err := models.CreateMessage(ctx, user, message.MessageID, message.Category, message.QuoteMessageID, message.Data, message.CreatedAt, message.UpdatedAt); err != nil { return err } return nil } func sendHelpMessage(ctx context.Context, user *models.User, mc *MessageContext, message *mixin.MessageView) error { if err := sendTextMessage(ctx, mc, message.ConversationID, config.AppConfig.MessageTemplate.MessageTipsHelp); err != nil { return err } if err := sendAppButton(ctx, mc, config.AppConfig.MessageTemplate.MessageTipsHelpBtn, message.ConversationID, config.AppConfig.Service.HTTPResourceHost); err != nil { return err } return nil } func decodeMessage(ctx context.Context, user *models.User, message *mixin.MessageView) (WsBroadcastMessage, error) { var bmsg WsBroadcastMessage bmsg.Category = message.Category bmsg.MessageId = message.MessageID bmsg.CreatedAt = message.UpdatedAt bmsg.Data = message.Data bmsg.SpeakerId = user.UserId bmsg.SpeakerName = user.FullName bmsg.SpeakerAvatar = user.AvatarURL if message.Category == "PLAIN_TEXT" { bytes, _ := base64.StdEncoding.DecodeString(message.Data) bmsg.Text = string(bytes) return bmsg, nil } if message.Category != "PLAIN_IMAGE" && message.Category != "PLAIN_VIDEO" && message.Category != "PLAIN_AUDIO" && message.Category != "PLAIN_DATA" { return bmsg, nil } data, err := base64.StdEncoding.DecodeString(message.Data) if err != nil { log.Println("message data decode error", err) return bmsg, err } att, err := attachmentFromMixinJSON(string(data)) if err != nil { log.Println("decode attachment error", err) return bmsg, err } attResp, err := bot.AttachemntShow(ctx, config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, att.ID) if err != nil { log.Println("get attachment details error", err) } att.ViewUrl = attResp.ViewURL bmsg.Attachment = att return bmsg, nil } func attachmentFromMixinJSON(jsonString string) (att WsBroadcastMessageAttachment, err error) { var data struct { ID string `json:"attachment_id"` Size int `json:"size"` MimeType string `json:"mime_type"` Name *string `json:"name"` Duration *uint `json:"duration"` Waveform *string `json:"waveform"` Width *uint `json:"width"` Height *uint `json:"height"` Thumbnail *string `json:"thumbnail"` } err = json.Unmarshal([]byte(jsonString), &data) if err != nil { return } att.ID = data.ID att.Size = data.Size att.MimeType = data.MimeType att.Duration = data.Duration if data.Waveform != nil { att.Waveform, err = base64.StdEncoding.DecodeString(*data.Waveform) if err != nil { return } } att.Name = data.Name att.Width = data.Width att.Height = data.Height if data.Thumbnail != nil { att.Thumbnail, err = base64.StdEncoding.DecodeString(*data.Thumbnail) if err != nil { return } } return } func generateRewardTraceID(originTraceID string) (string, error) { h := md5.New() io.WriteString(h, originTraceID) io.WriteString(h, "REWARD") sum := h.Sum(nil) sum[6] = (sum[6] & 0x0f) | 0x30 sum[8] = (sum[8] & 0x3f) | 0x80 id, err := uuid.FromBytes(sum) return id.String(), err }
random_line_split
message.go
package services import ( "context" "crypto/md5" "encoding/base64" "encoding/json" "errors" "fmt" "io" "log" "strconv" "strings" "time" "unicode/utf8" bot "github.com/MixinNetwork/bot-api-go-client" number "github.com/MixinNetwork/go-number" "github.com/MixinNetwork/supergroup.mixin.one/config" "github.com/MixinNetwork/supergroup.mixin.one/models" "github.com/MixinNetwork/supergroup.mixin.one/plugin" "github.com/MixinNetwork/supergroup.mixin.one/session" "github.com/fox-one/mixin-sdk" "github.com/gofrs/uuid" ) type TransferView struct { Type string `json:"type"` SnapshotId string `json:"snapshot_id"` CounterUserId string `json:"counter_user_id"` AssetId string `json:"asset_id"` Amount string `json:"amount"` TraceId string `json:"trace_id"` Memo string `json:"memo"` CreatedAt time.Time `json:"created_at"` } type MessageService struct{} type MessageContext struct { user *mixin.User bc chan WsBroadcastMessage recipientID map[string]time.Time } func (mc *MessageContext) OnMessage(ctx context.Context, msg *mixin.MessageView, userID string) error { if msg.Category == "SYSTEM_ACCOUNT_SNAPSHOT" && msg.UserID != config.AppConfig.Mixin.ClientId { data, err := base64.StdEncoding.DecodeString(msg.Data) if err != nil { return session.BlazeServerError(ctx, err) } var transfer TransferView err = json.Unmarshal(data, &transfer) if err != nil { return session.BlazeServerError(ctx, err) } err = handleTransfer(ctx, mc, transfer, msg.UserID) if err != nil { return session.BlazeServerError(ctx, err) } } else if msg.ConversationID == models.UniqueConversationId(config.AppConfig.Mixin.ClientId, msg.UserID) { if err := handleMessage(ctx, mc, msg, mc.bc); err != nil { return err } } return nil } func (mc *MessageContext) OnAckReceipt(ctx context.Context, msg *mixin.MessageView, userID string) error { if msg.Status != "READ" { return nil } id, err := models.FindDistributedMessageRecipientId(ctx, msg.MessageID) if err != nil { session.Logger(ctx).Error("ACKNOWLEDGE_MESSAGE_RECEIPT FindDistributedMessageRecipientId", err) return nil } if id == "" { return nil } if time.Since(mc.recipientID[id]) > models.UserActivePeriod { if err := models.PingUserActiveAt(ctx, id); err != nil { session.Logger(ctx).Error("ACKNOWLEDGE_MESSAGE_RECEIPT PingUserActiveAt", err) } mc.recipientID[id] = time.Now() } return nil } type TransferMemoInst struct { Action string `json:"a"` Param1 string `json:"p1"` Param2 string `json:"p2"` } func (service *MessageService) Run(ctx context.Context, broadcastChan chan WsBroadcastMessage) error { go distribute(ctx) go loopPendingMessage(ctx) go handlePendingParticipants(ctx) go handleExpiredPackets(ctx) go schedulePluginCronJob(ctx) user, err := mixin.NewUser( config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, ) if err != nil { panic(err) } mc := &MessageContext{ user: user, bc: broadcastChan, recipientID: map[string]time.Time{}, } for { b := mixin.NewBlazeClient(user) if err := b.Loop(ctx, mc); err != nil { session.Logger(ctx).Error(err) } session.Logger(ctx).Info("connection loop end") time.Sleep(300 * time.Millisecond) } } func handleTransfer(ctx context.Context, mc *MessageContext, transfer TransferView, userId string) error { id, err := bot.UuidFromString(transfer.TraceId) if err != nil { return nil } user, err := models.FindUser(ctx, userId) if user == nil || err != nil { log.Println("No such a user", userId) return err } if inst, err := crackTransferProtocol(ctx, mc, transfer, user); err == nil && inst.Action != "" { if inst.Action == "rewards" { return handleRewardsPayment(ctx, mc, transfer, user, inst) } else { log.Println("Unknown instruction", inst) } } else { log.Println("Incorrect inst, fallback: ", transfer.TraceId, transfer.Memo, err) if user.TraceId == transfer.TraceId { log.Println("New legacy payment", userId, transfer.TraceId) if transfer.Amount == config.AppConfig.System.PaymentAmount && transfer.AssetId == config.AppConfig.System.PaymentAssetId { return user.Payment(ctx) } for _, asset := range config.AppConfig.System.AccpetPaymentAssetList { if number.FromString(transfer.Amount).Equal(number.FromString(asset.Amount).RoundFloor(8)) && transfer.AssetId == asset.AssetId { return user.Payment(ctx) } } } else if order, err := models.GetOrder(ctx, transfer.TraceId); err == nil && order != nil { log.Println("New order received", userId, transfer.TraceId) return handleOrderPayment(ctx, mc, transfer, order) } else if packet, err := models.PayPacket(ctx, id.String(), transfer.AssetId, transfer.Amount); err != nil || packet == nil { log.Println("New packet paid", userId, transfer.TraceId, id) return err } else if packet.State == models.PacketStatePaid { log.Println("New packet prepared", userId, transfer.TraceId, packet.PacketId) return sendAppCard(ctx, mc, packet) } } return nil } func crackTransferProtocol(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User) (*TransferMemoInst, error) { var data *TransferMemoInst err := json.Unmarshal([]byte(transfer.Memo), &data) return data, err } func handleRewardsPayment(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User, inst *TransferMemoInst) error
func handleOrderPayment(ctx context.Context, mc *MessageContext, transfer TransferView, order *models.Order) error { if order.PayMethod == models.PayMethodMixin && number.FromString(transfer.Amount).Equal(number.FromString(order.Amount).RoundFloor(8)) && order.AssetId == transfer.AssetId { _, err := models.MarkOrderAsPaidByOrderId(ctx, order.OrderId) if err != nil { log.Println(err) return err } } return nil } func sendAppCard(ctx context.Context, mc *MessageContext, packet *models.Packet) error { description := fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, packet.User.FullName) if strings.TrimSpace(packet.User.FullName) == "" { description = config.AppConfig.MessageTemplate.GroupRedPacketShortDesc } if count := utf8.RuneCountInString(description); count > 100 { name := string([]rune(packet.User.FullName)[:16]) description = fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, name) } host := config.AppConfig.Service.HTTPResourceHost if config.AppConfig.System.RouterMode == config.RouterModeHash { host = host + config.RouterModeHashSymbol } card, err := json.Marshal(map[string]string{ "icon_url": "https://images.mixin.one/X44V48LK9oEBT3izRGKqdVSPfiH5DtYTzzF0ch5nP-f7tO4v0BTTqVhFEHqd52qUeuVas-BSkLH1ckxEI51-jXmF=s256", "title": config.AppConfig.MessageTemplate.GroupRedPacket, "description": description, "action": host + "/packets/" + packet.PacketId, }) if err != nil { return session.BlazeServerError(ctx, err) } t := time.Now() u := &models.User{UserId: config.AppConfig.Mixin.ClientId, ActiveAt: time.Now()} _, err = models.CreateMessage(ctx, u, packet.PacketId, models.MessageCategoryAppCard, "", base64.StdEncoding.EncodeToString(card), t, t) if err != nil { return session.BlazeServerError(ctx, err) } return nil } func handleExpiredPackets(ctx context.Context) { var limit = 100 for { packetIds, err := models.ListExpiredPackets(ctx, limit) if err != nil { session.Logger(ctx).Error(err) time.Sleep(300 * time.Millisecond) continue } for _, id := range packetIds { packet, err := models.SendPacketRefundTransfer(ctx, id) if err != nil { session.Logger(ctx).Infof("REFUND ERROR %v, %v\n", id, err) break } if packet != nil { session.Logger(ctx).Infof("REFUND %v\n", id) } } if len(packetIds) < limit { time.Sleep(300 * time.Millisecond) continue } } } func schedulePluginCronJob(ctx context.Context) { plugin.RunCron() } func handlePendingParticipants(ctx context.Context) { var limit = 100 for { participants, err := models.ListPendingParticipants(ctx, limit) if err != nil { session.Logger(ctx).Error(err) time.Sleep(300 * time.Millisecond) continue } for _, p := range participants { err = models.SendParticipantTransfer(ctx, p.PacketId, p.UserId, p.Amount) if err != nil { session.Logger(ctx).Error(err) break } } if len(participants) < limit { time.Sleep(300 * time.Millisecond) continue } } } func handleMessage(ctx context.Context, mc *MessageContext, message *mixin.MessageView, broadcastChan chan WsBroadcastMessage) error { user, err := models.FindUser(ctx, message.UserID) if err != nil { return err } if user == nil || user.State != models.PaymentStatePaid { return sendHelpMessage(ctx, user, mc, message) } if time.Since(user.ActiveAt) > models.UserActivePeriod { err = models.PingUserActiveAt(ctx, user.UserId) if err != nil { session.Logger(ctx).Error("handleMessage PingUserActiveAt", err) } } if user.SubscribedAt.IsZero() { return sendTextMessage(ctx, mc, message.ConversationID, config.AppConfig.MessageTemplate.MessageTipsUnsubscribe) } dataBytes, err := base64.StdEncoding.DecodeString(message.Data) if err != nil { return session.BadDataError(ctx) } else if len(dataBytes) < 10 { if strings.ToUpper(string(dataBytes)) == config.AppConfig.MessageTemplate.MessageCommandsInfo { if count, err := models.SubscribersCount(ctx); err != nil { return err } else { return sendTextMessage(ctx, mc, message.ConversationID, fmt.Sprintf(config.AppConfig.MessageTemplate.MessageCommandsInfoResp, count)) } } } // broadcast if isBroadcastOn, err := models.ReadBroadcastProperty(ctx); err == nil && isBroadcastOn == "on" { go func() { if bmsg, err := decodeMessage(ctx, user, message); err == nil { broadcastChan <- bmsg } }() } if _, err := models.CreateMessage(ctx, user, message.MessageID, message.Category, message.QuoteMessageID, message.Data, message.CreatedAt, message.UpdatedAt); err != nil { return err } return nil } func sendHelpMessage(ctx context.Context, user *models.User, mc *MessageContext, message *mixin.MessageView) error { if err := sendTextMessage(ctx, mc, message.ConversationID, config.AppConfig.MessageTemplate.MessageTipsHelp); err != nil { return err } if err := sendAppButton(ctx, mc, config.AppConfig.MessageTemplate.MessageTipsHelpBtn, message.ConversationID, config.AppConfig.Service.HTTPResourceHost); err != nil { return err } return nil } func decodeMessage(ctx context.Context, user *models.User, message *mixin.MessageView) (WsBroadcastMessage, error) { var bmsg WsBroadcastMessage bmsg.Category = message.Category bmsg.MessageId = message.MessageID bmsg.CreatedAt = message.UpdatedAt bmsg.Data = message.Data bmsg.SpeakerId = user.UserId bmsg.SpeakerName = user.FullName bmsg.SpeakerAvatar = user.AvatarURL if message.Category == "PLAIN_TEXT" { bytes, _ := base64.StdEncoding.DecodeString(message.Data) bmsg.Text = string(bytes) return bmsg, nil } if message.Category != "PLAIN_IMAGE" && message.Category != "PLAIN_VIDEO" && message.Category != "PLAIN_AUDIO" && message.Category != "PLAIN_DATA" { return bmsg, nil } data, err := base64.StdEncoding.DecodeString(message.Data) if err != nil { log.Println("message data decode error", err) return bmsg, err } att, err := attachmentFromMixinJSON(string(data)) if err != nil { log.Println("decode attachment error", err) return bmsg, err } attResp, err := bot.AttachemntShow(ctx, config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, att.ID) if err != nil { log.Println("get attachment details error", err) } att.ViewUrl = attResp.ViewURL bmsg.Attachment = att return bmsg, nil } func attachmentFromMixinJSON(jsonString string) (att WsBroadcastMessageAttachment, err error) { var data struct { ID string `json:"attachment_id"` Size int `json:"size"` MimeType string `json:"mime_type"` Name *string `json:"name"` Duration *uint `json:"duration"` Waveform *string `json:"waveform"` Width *uint `json:"width"` Height *uint `json:"height"` Thumbnail *string `json:"thumbnail"` } err = json.Unmarshal([]byte(jsonString), &data) if err != nil { return } att.ID = data.ID att.Size = data.Size att.MimeType = data.MimeType att.Duration = data.Duration if data.Waveform != nil { att.Waveform, err = base64.StdEncoding.DecodeString(*data.Waveform) if err != nil { return } } att.Name = data.Name att.Width = data.Width att.Height = data.Height if data.Thumbnail != nil { att.Thumbnail, err = base64.StdEncoding.DecodeString(*data.Thumbnail) if err != nil { return } } return } func generateRewardTraceID(originTraceID string) (string, error) { h := md5.New() io.WriteString(h, originTraceID) io.WriteString(h, "REWARD") sum := h.Sum(nil) sum[6] = (sum[6] & 0x0f) | 0x30 sum[8] = (sum[8] & 0x3f) | 0x80 id, err := uuid.FromBytes(sum) return id.String(), err }
{ userId := inst.Param1 targetUser, err := models.FindUser(ctx, userId) if err != nil { log.Println("can't find user to reward", userId, err) return nil } memo := "Rewards from " + strconv.FormatInt(user.IdentityNumber, 10) log.Println("Rewards from " + user.FullName + " to " + targetUser.UserId + " with traceID " + transfer.SnapshotId) var traceID string traceID = transfer.SnapshotId if err != nil { return errors.New("generate trace id failed") } in := &bot.TransferInput{ AssetId: transfer.AssetId, RecipientId: targetUser.UserId, Amount: number.FromString(transfer.Amount), TraceId: traceID, Memo: memo, } if err := bot.CreateTransfer(ctx, in, config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, config.AppConfig.Mixin.SessionAssetPIN, config.AppConfig.Mixin.PinToken); err != nil { log.Println("can't transfer to recipient", err) return err } if user.UserId != targetUser.UserId { if err := models.CreateTip(ctx, user.UserId, targetUser.UserId, transfer.AssetId, transfer.Amount, traceID, transfer.CreatedAt); err != nil { log.Println("can't record tip", err) // return err } if err := models.CreateRewardsMessage(ctx, user, targetUser, transfer.Amount, inst.Param2); err != nil { log.Println("can't create rewards message", err) // return err } } return nil }
identifier_body
message.go
package services import ( "context" "crypto/md5" "encoding/base64" "encoding/json" "errors" "fmt" "io" "log" "strconv" "strings" "time" "unicode/utf8" bot "github.com/MixinNetwork/bot-api-go-client" number "github.com/MixinNetwork/go-number" "github.com/MixinNetwork/supergroup.mixin.one/config" "github.com/MixinNetwork/supergroup.mixin.one/models" "github.com/MixinNetwork/supergroup.mixin.one/plugin" "github.com/MixinNetwork/supergroup.mixin.one/session" "github.com/fox-one/mixin-sdk" "github.com/gofrs/uuid" ) type TransferView struct { Type string `json:"type"` SnapshotId string `json:"snapshot_id"` CounterUserId string `json:"counter_user_id"` AssetId string `json:"asset_id"` Amount string `json:"amount"` TraceId string `json:"trace_id"` Memo string `json:"memo"` CreatedAt time.Time `json:"created_at"` } type MessageService struct{} type MessageContext struct { user *mixin.User bc chan WsBroadcastMessage recipientID map[string]time.Time } func (mc *MessageContext) OnMessage(ctx context.Context, msg *mixin.MessageView, userID string) error { if msg.Category == "SYSTEM_ACCOUNT_SNAPSHOT" && msg.UserID != config.AppConfig.Mixin.ClientId { data, err := base64.StdEncoding.DecodeString(msg.Data) if err != nil { return session.BlazeServerError(ctx, err) } var transfer TransferView err = json.Unmarshal(data, &transfer) if err != nil { return session.BlazeServerError(ctx, err) } err = handleTransfer(ctx, mc, transfer, msg.UserID) if err != nil { return session.BlazeServerError(ctx, err) } } else if msg.ConversationID == models.UniqueConversationId(config.AppConfig.Mixin.ClientId, msg.UserID) { if err := handleMessage(ctx, mc, msg, mc.bc); err != nil { return err } } return nil } func (mc *MessageContext) OnAckReceipt(ctx context.Context, msg *mixin.MessageView, userID string) error { if msg.Status != "READ" { return nil } id, err := models.FindDistributedMessageRecipientId(ctx, msg.MessageID) if err != nil { session.Logger(ctx).Error("ACKNOWLEDGE_MESSAGE_RECEIPT FindDistributedMessageRecipientId", err) return nil } if id == "" { return nil } if time.Since(mc.recipientID[id]) > models.UserActivePeriod { if err := models.PingUserActiveAt(ctx, id); err != nil { session.Logger(ctx).Error("ACKNOWLEDGE_MESSAGE_RECEIPT PingUserActiveAt", err) } mc.recipientID[id] = time.Now() } return nil } type TransferMemoInst struct { Action string `json:"a"` Param1 string `json:"p1"` Param2 string `json:"p2"` } func (service *MessageService) Run(ctx context.Context, broadcastChan chan WsBroadcastMessage) error { go distribute(ctx) go loopPendingMessage(ctx) go handlePendingParticipants(ctx) go handleExpiredPackets(ctx) go schedulePluginCronJob(ctx) user, err := mixin.NewUser( config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, ) if err != nil { panic(err) } mc := &MessageContext{ user: user, bc: broadcastChan, recipientID: map[string]time.Time{}, } for { b := mixin.NewBlazeClient(user) if err := b.Loop(ctx, mc); err != nil { session.Logger(ctx).Error(err) } session.Logger(ctx).Info("connection loop end") time.Sleep(300 * time.Millisecond) } } func handleTransfer(ctx context.Context, mc *MessageContext, transfer TransferView, userId string) error { id, err := bot.UuidFromString(transfer.TraceId) if err != nil { return nil } user, err := models.FindUser(ctx, userId) if user == nil || err != nil { log.Println("No such a user", userId) return err } if inst, err := crackTransferProtocol(ctx, mc, transfer, user); err == nil && inst.Action != "" { if inst.Action == "rewards" { return handleRewardsPayment(ctx, mc, transfer, user, inst) } else { log.Println("Unknown instruction", inst) } } else { log.Println("Incorrect inst, fallback: ", transfer.TraceId, transfer.Memo, err) if user.TraceId == transfer.TraceId { log.Println("New legacy payment", userId, transfer.TraceId) if transfer.Amount == config.AppConfig.System.PaymentAmount && transfer.AssetId == config.AppConfig.System.PaymentAssetId { return user.Payment(ctx) } for _, asset := range config.AppConfig.System.AccpetPaymentAssetList { if number.FromString(transfer.Amount).Equal(number.FromString(asset.Amount).RoundFloor(8)) && transfer.AssetId == asset.AssetId { return user.Payment(ctx) } } } else if order, err := models.GetOrder(ctx, transfer.TraceId); err == nil && order != nil { log.Println("New order received", userId, transfer.TraceId) return handleOrderPayment(ctx, mc, transfer, order) } else if packet, err := models.PayPacket(ctx, id.String(), transfer.AssetId, transfer.Amount); err != nil || packet == nil { log.Println("New packet paid", userId, transfer.TraceId, id) return err } else if packet.State == models.PacketStatePaid { log.Println("New packet prepared", userId, transfer.TraceId, packet.PacketId) return sendAppCard(ctx, mc, packet) } } return nil } func crackTransferProtocol(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User) (*TransferMemoInst, error) { var data *TransferMemoInst err := json.Unmarshal([]byte(transfer.Memo), &data) return data, err } func handleRewardsPayment(ctx context.Context, mc *MessageContext, transfer TransferView, user *models.User, inst *TransferMemoInst) error { userId := inst.Param1 targetUser, err := models.FindUser(ctx, userId) if err != nil { log.Println("can't find user to reward", userId, err) return nil } memo := "Rewards from " + strconv.FormatInt(user.IdentityNumber, 10) log.Println("Rewards from " + user.FullName + " to " + targetUser.UserId + " with traceID " + transfer.SnapshotId) var traceID string traceID = transfer.SnapshotId if err != nil { return errors.New("generate trace id failed") } in := &bot.TransferInput{ AssetId: transfer.AssetId, RecipientId: targetUser.UserId, Amount: number.FromString(transfer.Amount), TraceId: traceID, Memo: memo, } if err := bot.CreateTransfer(ctx, in, config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, config.AppConfig.Mixin.SessionAssetPIN, config.AppConfig.Mixin.PinToken); err != nil { log.Println("can't transfer to recipient", err) return err } if user.UserId != targetUser.UserId { if err := models.CreateTip(ctx, user.UserId, targetUser.UserId, transfer.AssetId, transfer.Amount, traceID, transfer.CreatedAt); err != nil { log.Println("can't record tip", err) // return err } if err := models.CreateRewardsMessage(ctx, user, targetUser, transfer.Amount, inst.Param2); err != nil { log.Println("can't create rewards message", err) // return err } } return nil } func handleOrderPayment(ctx context.Context, mc *MessageContext, transfer TransferView, order *models.Order) error { if order.PayMethod == models.PayMethodMixin && number.FromString(transfer.Amount).Equal(number.FromString(order.Amount).RoundFloor(8)) && order.AssetId == transfer.AssetId { _, err := models.MarkOrderAsPaidByOrderId(ctx, order.OrderId) if err != nil { log.Println(err) return err } } return nil } func sendAppCard(ctx context.Context, mc *MessageContext, packet *models.Packet) error { description := fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, packet.User.FullName) if strings.TrimSpace(packet.User.FullName) == "" { description = config.AppConfig.MessageTemplate.GroupRedPacketShortDesc } if count := utf8.RuneCountInString(description); count > 100 { name := string([]rune(packet.User.FullName)[:16]) description = fmt.Sprintf(config.AppConfig.MessageTemplate.GroupRedPacketDesc, name) } host := config.AppConfig.Service.HTTPResourceHost if config.AppConfig.System.RouterMode == config.RouterModeHash { host = host + config.RouterModeHashSymbol } card, err := json.Marshal(map[string]string{ "icon_url": "https://images.mixin.one/X44V48LK9oEBT3izRGKqdVSPfiH5DtYTzzF0ch5nP-f7tO4v0BTTqVhFEHqd52qUeuVas-BSkLH1ckxEI51-jXmF=s256", "title": config.AppConfig.MessageTemplate.GroupRedPacket, "description": description, "action": host + "/packets/" + packet.PacketId, }) if err != nil { return session.BlazeServerError(ctx, err) } t := time.Now() u := &models.User{UserId: config.AppConfig.Mixin.ClientId, ActiveAt: time.Now()} _, err = models.CreateMessage(ctx, u, packet.PacketId, models.MessageCategoryAppCard, "", base64.StdEncoding.EncodeToString(card), t, t) if err != nil { return session.BlazeServerError(ctx, err) } return nil } func handleExpiredPackets(ctx context.Context) { var limit = 100 for { packetIds, err := models.ListExpiredPackets(ctx, limit) if err != nil { session.Logger(ctx).Error(err) time.Sleep(300 * time.Millisecond) continue } for _, id := range packetIds { packet, err := models.SendPacketRefundTransfer(ctx, id) if err != nil { session.Logger(ctx).Infof("REFUND ERROR %v, %v\n", id, err) break } if packet != nil { session.Logger(ctx).Infof("REFUND %v\n", id) } } if len(packetIds) < limit { time.Sleep(300 * time.Millisecond) continue } } } func schedulePluginCronJob(ctx context.Context) { plugin.RunCron() } func handlePendingParticipants(ctx context.Context) { var limit = 100 for { participants, err := models.ListPendingParticipants(ctx, limit) if err != nil { session.Logger(ctx).Error(err) time.Sleep(300 * time.Millisecond) continue } for _, p := range participants { err = models.SendParticipantTransfer(ctx, p.PacketId, p.UserId, p.Amount) if err != nil { session.Logger(ctx).Error(err) break } } if len(participants) < limit { time.Sleep(300 * time.Millisecond) continue } } } func handleMessage(ctx context.Context, mc *MessageContext, message *mixin.MessageView, broadcastChan chan WsBroadcastMessage) error { user, err := models.FindUser(ctx, message.UserID) if err != nil { return err } if user == nil || user.State != models.PaymentStatePaid { return sendHelpMessage(ctx, user, mc, message) } if time.Since(user.ActiveAt) > models.UserActivePeriod { err = models.PingUserActiveAt(ctx, user.UserId) if err != nil { session.Logger(ctx).Error("handleMessage PingUserActiveAt", err) } } if user.SubscribedAt.IsZero() { return sendTextMessage(ctx, mc, message.ConversationID, config.AppConfig.MessageTemplate.MessageTipsUnsubscribe) } dataBytes, err := base64.StdEncoding.DecodeString(message.Data) if err != nil { return session.BadDataError(ctx) } else if len(dataBytes) < 10 { if strings.ToUpper(string(dataBytes)) == config.AppConfig.MessageTemplate.MessageCommandsInfo { if count, err := models.SubscribersCount(ctx); err != nil { return err } else { return sendTextMessage(ctx, mc, message.ConversationID, fmt.Sprintf(config.AppConfig.MessageTemplate.MessageCommandsInfoResp, count)) } } } // broadcast if isBroadcastOn, err := models.ReadBroadcastProperty(ctx); err == nil && isBroadcastOn == "on" { go func() { if bmsg, err := decodeMessage(ctx, user, message); err == nil
}() } if _, err := models.CreateMessage(ctx, user, message.MessageID, message.Category, message.QuoteMessageID, message.Data, message.CreatedAt, message.UpdatedAt); err != nil { return err } return nil } func sendHelpMessage(ctx context.Context, user *models.User, mc *MessageContext, message *mixin.MessageView) error { if err := sendTextMessage(ctx, mc, message.ConversationID, config.AppConfig.MessageTemplate.MessageTipsHelp); err != nil { return err } if err := sendAppButton(ctx, mc, config.AppConfig.MessageTemplate.MessageTipsHelpBtn, message.ConversationID, config.AppConfig.Service.HTTPResourceHost); err != nil { return err } return nil } func decodeMessage(ctx context.Context, user *models.User, message *mixin.MessageView) (WsBroadcastMessage, error) { var bmsg WsBroadcastMessage bmsg.Category = message.Category bmsg.MessageId = message.MessageID bmsg.CreatedAt = message.UpdatedAt bmsg.Data = message.Data bmsg.SpeakerId = user.UserId bmsg.SpeakerName = user.FullName bmsg.SpeakerAvatar = user.AvatarURL if message.Category == "PLAIN_TEXT" { bytes, _ := base64.StdEncoding.DecodeString(message.Data) bmsg.Text = string(bytes) return bmsg, nil } if message.Category != "PLAIN_IMAGE" && message.Category != "PLAIN_VIDEO" && message.Category != "PLAIN_AUDIO" && message.Category != "PLAIN_DATA" { return bmsg, nil } data, err := base64.StdEncoding.DecodeString(message.Data) if err != nil { log.Println("message data decode error", err) return bmsg, err } att, err := attachmentFromMixinJSON(string(data)) if err != nil { log.Println("decode attachment error", err) return bmsg, err } attResp, err := bot.AttachemntShow(ctx, config.AppConfig.Mixin.ClientId, config.AppConfig.Mixin.SessionId, config.AppConfig.Mixin.SessionKey, att.ID) if err != nil { log.Println("get attachment details error", err) } att.ViewUrl = attResp.ViewURL bmsg.Attachment = att return bmsg, nil } func attachmentFromMixinJSON(jsonString string) (att WsBroadcastMessageAttachment, err error) { var data struct { ID string `json:"attachment_id"` Size int `json:"size"` MimeType string `json:"mime_type"` Name *string `json:"name"` Duration *uint `json:"duration"` Waveform *string `json:"waveform"` Width *uint `json:"width"` Height *uint `json:"height"` Thumbnail *string `json:"thumbnail"` } err = json.Unmarshal([]byte(jsonString), &data) if err != nil { return } att.ID = data.ID att.Size = data.Size att.MimeType = data.MimeType att.Duration = data.Duration if data.Waveform != nil { att.Waveform, err = base64.StdEncoding.DecodeString(*data.Waveform) if err != nil { return } } att.Name = data.Name att.Width = data.Width att.Height = data.Height if data.Thumbnail != nil { att.Thumbnail, err = base64.StdEncoding.DecodeString(*data.Thumbnail) if err != nil { return } } return } func generateRewardTraceID(originTraceID string) (string, error) { h := md5.New() io.WriteString(h, originTraceID) io.WriteString(h, "REWARD") sum := h.Sum(nil) sum[6] = (sum[6] & 0x0f) | 0x30 sum[8] = (sum[8] & 0x3f) | 0x80 id, err := uuid.FromBytes(sum) return id.String(), err }
{ broadcastChan <- bmsg }
conditional_block
main.go
package main import ( "flag" "fmt" "golang.org/x/sys/unix" "io/ioutil" "os" "path/filepath" "strings" "syscall" "time" "unicode/utf8" ) // Key Definitions const ( DummyKey = -1 ControlA = 1 ControlB = 2 ControlC = 3 ControlE = 5 ControlF = 6 ControlH = 8 Tab = 9 Enter = 13 ControlN = 14 ControlP = 16 ControlS = 19 ControlV = 22 BackSpace = 127 ArrowUp = 1000 ArrowDown = 1001 ArrowRight = 1002 ArrowLeft = 1003 ) // Color Definition type color int const ( DummyColor color = 37 FgGreen = 32 FgCyan = 36 BgBlack = 40 BgCyan = 46 ) const ( helpMessage = "HELP: Ctrl+S = Save / Ctrl+C = Quit" ) type messageType int const ( resetMessage messageType = iota + 1 ) type Keyword string const ( Break Keyword = "break" Default = "default" Func = "func" Interface = "interface" Select = "select" Case = "case" Defer = "defer" Go = "go" Map = "map" Struct = "struct" Chan = "chan" Else = "else" Goto = "goto" Package = "package" Switch = "switch" Const = "const" Fallthrough = "fallthrough" If = "if" Range = "range" Type = "type" Continue = "continue" For = "for" Import = "import" Return = "return" Var = "var" ) var keywordColor = map[Keyword]color{ Break: FgCyan, Default: FgCyan, Interface: FgCyan, Select: FgCyan, Case: FgCyan, Defer: FgCyan, Go: FgCyan, Map: FgCyan, Struct: FgCyan, Chan: FgCyan, Else: FgCyan, Goto: FgCyan, Switch: FgCyan, Const: FgCyan, Fallthrough: FgCyan, Return: FgCyan, Range: FgCyan, Type: FgCyan, Continue: FgCyan, For: FgCyan, If: FgCyan, Package: FgCyan, Import: FgCyan, Func: FgCyan, Var: FgCyan, } type Editor struct { filePath string keyChan chan rune timeChan chan messageType crow int ccol int scroolrow int rows []*Row terminal *Terminal n int // numberOfRows debug bool // for debug } type Terminal struct { termios *unix.Termios width int height int } type Row struct { chars *GapTable // render } func (e *Editor) debugPrint(a ...interface{}) { if e.debug { _, _ = fmt.Fprintln(os.Stderr, a...) } } func (e *Editor) debugDetailPrint(a ...interface{}) { if e.debug { _, _ = fmt.Fprintf(os.Stderr, "%+v\n", a...) } } func (e *Editor) debugRowRunes() { if e.debug { i := 0 for i < e.n { _, _ = fmt.Fprintln(os.Stderr, i, ":", e.rows[i].chars.Runes()) i += 1 } } } // Terminal func makeRaw(fd int) *unix.Termios { termios, err := unix.IoctlGetTermios(fd, unix.TIOCGETA) if err != nil { panic(err) } termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON termios.Oflag &^= unix.OPOST termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN termios.Cflag &^= unix.CSIZE | unix.PARENB termios.Cflag |= unix.CS8 termios.Cc[unix.VMIN] = 1 termios.Cc[unix.VTIME] = 0 if err := unix.IoctlSetTermios(fd, unix.TIOCSETA, termios); err != nil { panic(err) } return termios } func (e *Editor) restoreTerminal(fd int) { if err := unix.IoctlSetTermios(fd, unix.TIOCSETA, e.terminal.termios); err != nil { panic(err) } } func getWindowSize(fd int) (int, int) { ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) if err != nil { panic(err) } return int(ws.Col), int(ws.Row) } func (e *Editor) initTerminal() { e.flush() e.writeHelpMenu(helpMessage) e.writeStatusBar() e.moveCursor(e.crow, e.ccol) } func (e *Editor) writeHelpMenu(message string) { prevRow, prevCol := e.crow, e.ccol for i, ch := range message { e.moveCursor(e.terminal.height+1, i) e.write([]byte(string(ch))) } for i := len(message); i < e.terminal.width; i++ { e.moveCursor(e.terminal.height+1, i) e.write([]byte{' '}) } e.moveCursor(prevRow, prevCol) } func (e *Editor) writeStatusBar() { e.setBgColor(BgCyan) defer e.setBgColor(BgBlack) // Write file name for i, ch := range e.filePath { e.moveCursor(e.terminal.height, i) e.write([]byte(string(ch))) } // Write Spacer for i := len(e.filePath); i < e.terminal.width; i++ { e.moveCursor(e.terminal.height, i) e.write([]byte{' '}) } } // Views func (e *Editor) write(b []byte) { syscall.Write(0, b) } func (e *Editor) writeWithColor(b []byte, colors []color) { var newBuf []byte for i, c := range colors { s := fmt.Sprintf("\033[%dm", c) newBuf = append(newBuf, []byte(s)...) newBuf = append(newBuf, b[i]) } syscall.Write(0, newBuf) } func (e *Editor) highlight(b []byte) []color { colors := make([]color, len(b)) for i := range colors { colors[i] = DummyColor } // ASCII-only ascii := string(b) // Keywords for key := range keywordColor { index := strings.Index(ascii, string(key)) if index != -1 { for i := 0; i < len(string(key)); i += 1 { colors[index+i] = keywordColor[key] } } } // String Literal isStringLit := false for i, b := range ascii { if b == '"' || isStringLit { if b == '"' { isStringLit = !isStringLit } colors[i] = FgGreen } } return colors } func (e *Editor) writeRow(r *Row) { var buf []byte for _, r := range r.chars.Runes() { buf = append(buf, []byte(string(r))...) } e.moveCursor(e.crow, 0) e.flushRow() // If the extension of fileName is .go, write with highlights. if filepath.Ext(e.filePath) == ".go" { colors := e.highlight(buf) e.writeWithColor(buf, colors) } else { e.write(buf) } } func (e *Editor) flush() { e.write([]byte("\033[2J")) } func (e *Editor) flushRow() { e.write([]byte("\033[2K")) } func (e *Editor) setBgColor(color color) { s := fmt.Sprintf("\033[%dm", color) e.write([]byte(s)) } func (e *Editor) moveCursor(row, col int) { s := fmt.Sprintf("\033[%d;%dH", row+1, col+1) // 0-origin to 1-origin e.write([]byte(s)) } func (e *Editor) updateRowRunes(row *Row) { if e.crow < e.terminal.height { e.debugPrint("DEBUG: row's view updated at", e.crow + e.scroolrow, "for", row.chars.Runes()) e.writeRow(row) } } func (e *Editor) refreshAllRows() { for i := 0; i < e.terminal.height; i += 1 { e.crow = i e.writeRow(e.rows[e.scroolrow+i]) } } func (e *Editor) setRowPos(row int) { if row >= e.n { row = e.n - 1 } if row < 0 { if e.scroolrow > 0 { e.scroolrow -= 1 e.refreshAllRows() } row = 0 } if row >= e.terminal.height { if row+e.scroolrow <= e.n { e.scroolrow += 1 } row = e.terminal.height - 1 e.refreshAllRows() } e.crow = row e.moveCursor(row, e.ccol) } func (e *Editor) setColPos(col int) { if col < 0 { col = 0 } if col >= e.currentRow().visibleLen() { col = e.currentRow().visibleLen() } if col >= e.terminal.width { col = e.terminal.width - 1
} func (e *Editor) setRowCol(row int, col int) { if row > e.n && col > e.currentRow().visibleLen() { return } e.setRowPos(row) e.setColPos(col) } // Models func (r *Row) deleteAt(col int) { if col >= r.len() { return } r.chars.DeleteAt(col) } func (r *Row) insertAt(colPos int, newRune rune) { if colPos > r.len() { colPos = r.len() } r.chars.InsertAt(colPos, newRune) } func (r *Row) len() int { return r.chars.Len() } func (r *Row) visibleLen() int { return r.chars.VisibleLen() } func (e *Editor) currentRow() *Row { return e.rows[e.crow + e.scroolrow] } func (e *Editor) deleteRune(row *Row, col int) { row.deleteAt(col) e.updateRowRunes(row) e.setRowCol(e.crow, e.ccol - 1) } func (e *Editor) insertRune(row *Row, col int, newRune rune) { row.insertAt(col, newRune) e.updateRowRunes(row) } func (e *Editor) deleteRow(row int) { e.rows = append(e.rows[:row], e.rows[row+1:]...) e.n -= 1 prevRowPos := e.crow e.refreshAllRows() e.crow = prevRowPos } func (e *Editor) replaceRune(row int, newRune []rune) { gt := NewGapTable(128) for _, r := range newRune { gt.AppendRune(r) } r := &Row{ chars: gt, } e.rows[row] = r prevRowPos := e.crow e.crow = row - e.scroolrow e.updateRowRunes(r) e.crow = prevRowPos } func (e *Editor) insertRow(row int, runes []rune) { gt := NewGapTable(128) for _, r := range runes { gt.AppendRune(r) } r := &Row{ chars: gt, } // https://github.com/golang/go/wiki/SliceTricks e.rows = append(e.rows[:row], append([]*Row{ r }, e.rows[row:]...)...) e.n += 1 e.reallocBufferIfNeeded() prevRowPos := e.crow e.refreshAllRows() e.crow = prevRowPos } func (e *Editor) reallocBufferIfNeeded() { if e.n == len(e.rows) { newCap := cap(e.rows) * 2 newRows := make([]*Row, newCap) copy(newRows, e.rows) e.rows = newRows e.debugPrint("DEBUG: realloc occurred") } } func (e *Editor) numberOfRunesInRow() int { return e.currentRow().chars.Len() } func (e *Editor) backspace() { row := e.currentRow() if e.ccol == 0 { if e.crow + e.scroolrow > 0 { prevRowPos := e.crow + e.scroolrow - 1 prevRow := e.rows[prevRowPos] // Update the previous row. newRunes := append([]rune{}, prevRow.chars.Runes()[:prevRow.len()-1]...) newRunes = append(newRunes, row.chars.Runes()...) e.replaceRune(prevRowPos, newRunes) // Delete the current row currentRowPos := e.crow + e.scroolrow e.deleteRow(currentRowPos) e.setRowCol(e.crow - 1, prevRow.len() - 1) } } else { e.deleteRune(row, e.ccol - 1) } e.debugRowRunes() } func (e *Editor) back() { if e.ccol == 0 { if e.crow > 0 { e.setRowCol(e.crow-1, e.rows[e.crow+e.scroolrow-1].visibleLen()) } } else { e.setRowCol(e.crow, e.ccol-1) } } func (e *Editor) next() { if e.ccol >= e.currentRow().visibleLen() { if e.crow+1 < e.n { e.setRowCol(e.crow+1, 0) } } else { e.setRowCol(e.crow, e.ccol+1) } } func (e *Editor) newLine() { // Insert the new row. currentLineRowPos := e.crow + e.scroolrow currentLineRow := e.rows[currentLineRowPos] newLineRowPos := e.crow + e.scroolrow + 1 nextRowRunes := append([]rune{}, currentLineRow.chars.Runes()[e.ccol:]...) e.insertRow(newLineRowPos, nextRowRunes) // Update the current row. currentRowNewRunes := append([]rune{}, currentLineRow.chars.Runes()[:e.ccol]...) currentRowNewRunes = append(currentRowNewRunes, '\n') e.replaceRune(e.crow + e.scroolrow, currentRowNewRunes) e.setRowCol(e.crow + 1, 0) e.debugRowRunes() } func existsFile(filename string) bool { _, err := os.Stat(filename) return err == nil } func saveFile(filePath string, rows []*Row) { sb := strings.Builder{} for _, r := range rows { if r.len() >= 1 { for _, ch := range r.chars.Runes() { sb.WriteRune(ch) } } } _ = ioutil.WriteFile(filePath, []byte(sb.String()), 0644) } func loadFile(filePath string) *Editor { e := &Editor{ crow: 0, ccol: 0, scroolrow: 0, filePath: filePath, keyChan: make(chan rune), timeChan: make(chan messageType), n: 1, } rows := makeRows() bytes, err := ioutil.ReadFile(filePath) if err != nil { panic(err) } gt := NewGapTable(128) for _, b := range bytes { // Treat TAB as 4 spaces. if b == Tab { gt.AppendRune(rune(0x20)) gt.AppendRune(rune(0x20)) gt.AppendRune(rune(0x20)) gt.AppendRune(rune(0x20)) continue } // ASCII-only gt.AppendRune(rune(b)) if b == '\n' { rows[e.n-1] = &Row{chars: gt} e.n += 1 gt = NewGapTable(128) } } rows[e.n-1] = &Row{chars: gt} e.rows = rows return e } func (e *Editor) exit() { e.restoreTerminal(0) } func (e *Editor) parseKey(b []byte) (rune, int) { // Try parsing escape sequence if len(b) == 3 { if b[0] == byte(27) && b[1] == '[' { switch b[2] { case 'A': return ArrowUp, 3 case 'B': return ArrowDown, 3 case 'C': return ArrowRight, 3 case 'D': return ArrowLeft, 3 default: return DummyKey, 0 } } } // parse bytes as UTF-8. return utf8.DecodeRune(b) } func (e *Editor) readKeys() { buf := make([]byte, 64) for { if n, err := syscall.Read(0, buf); err == nil { b := buf[:n] for { r, n := e.parseKey(b) if n == 0 { break } e.keyChan <- r b = b[n:] } } } } func (e *Editor) interpretKey() { for { r := <-e.keyChan switch r { case ControlA: e.setRowCol(e.crow, 0) case ControlB, ArrowLeft: e.back() case ControlC: e.exit() return case ControlE: e.setRowCol(e.crow, e.numberOfRunesInRow()) case ControlF, ArrowRight: e.next() case ControlH, BackSpace: e.backspace() case ControlN, ArrowDown: e.setRowCol(e.crow+1, e.ccol) case Tab: for i := 0; i < 4; i += 1 { e.insertRune(e.currentRow(), e.ccol, rune(' ')) } e.setColPos(e.ccol + 4) case Enter: e.newLine() case ControlS: saveFile(e.filePath, e.rows) e.writeHelpMenu("Saved!") e.timeChan <- resetMessage case ControlP, ArrowUp: e.setRowCol(e.crow-1, e.ccol) // for debug case ControlV: e.debugDetailPrint(e) default: e.insertRune(e.currentRow(), e.ccol, r) e.setColPos(e.ccol + 1) } } } func (e *Editor) pollTimerEvent() { for { switch <-e.timeChan { case resetMessage: t := time.NewTimer(2 * time.Second) <-t.C e.writeHelpMenu(helpMessage) } } } func newTerminal(fd int) *Terminal { termios := makeRaw(fd) width, height := getWindowSize(fd) terminal := &Terminal{ termios: termios, width: width, height: height - 2, // for status, message bar } return terminal } func makeRows() []*Row { var rows = make([]*Row, 1024) // not good for i := range rows { rows[i] = &Row{ chars: NewGapTable(128), } } return rows } func newEditor(filePath string, debug bool) *Editor { terminal := newTerminal(0) if existsFile(filePath) { e := loadFile(filePath) e.debug = debug e.terminal = terminal return e } rows := makeRows() return &Editor{ crow: 0, ccol: 0, scroolrow: 0, rows: rows, filePath: filePath, keyChan: make(chan rune), timeChan: make(chan messageType), terminal: terminal, n: 1, debug: debug, } } func run(filePath string, debug bool) { e := newEditor(filePath, debug) e.initTerminal() e.refreshAllRows() e.setRowCol(0, 0) go e.readKeys() go e.pollTimerEvent() e.interpretKey() } func main() { flag.Parse() if flag.NArg() < 1 || flag.NArg() > 3 { fmt.Println("Usage: mille <filename> [--debug]") return } debug := flag.NArg() == 2 && flag.Arg(1) == "--debug" run(flag.Arg(0), debug) }
} e.ccol = col e.moveCursor(e.crow, e.ccol)
random_line_split
main.go
package main import ( "flag" "fmt" "golang.org/x/sys/unix" "io/ioutil" "os" "path/filepath" "strings" "syscall" "time" "unicode/utf8" ) // Key Definitions const ( DummyKey = -1 ControlA = 1 ControlB = 2 ControlC = 3 ControlE = 5 ControlF = 6 ControlH = 8 Tab = 9 Enter = 13 ControlN = 14 ControlP = 16 ControlS = 19 ControlV = 22 BackSpace = 127 ArrowUp = 1000 ArrowDown = 1001 ArrowRight = 1002 ArrowLeft = 1003 ) // Color Definition type color int const ( DummyColor color = 37 FgGreen = 32 FgCyan = 36 BgBlack = 40 BgCyan = 46 ) const ( helpMessage = "HELP: Ctrl+S = Save / Ctrl+C = Quit" ) type messageType int const ( resetMessage messageType = iota + 1 ) type Keyword string const ( Break Keyword = "break" Default = "default" Func = "func" Interface = "interface" Select = "select" Case = "case" Defer = "defer" Go = "go" Map = "map" Struct = "struct" Chan = "chan" Else = "else" Goto = "goto" Package = "package" Switch = "switch" Const = "const" Fallthrough = "fallthrough" If = "if" Range = "range" Type = "type" Continue = "continue" For = "for" Import = "import" Return = "return" Var = "var" ) var keywordColor = map[Keyword]color{ Break: FgCyan, Default: FgCyan, Interface: FgCyan, Select: FgCyan, Case: FgCyan, Defer: FgCyan, Go: FgCyan, Map: FgCyan, Struct: FgCyan, Chan: FgCyan, Else: FgCyan, Goto: FgCyan, Switch: FgCyan, Const: FgCyan, Fallthrough: FgCyan, Return: FgCyan, Range: FgCyan, Type: FgCyan, Continue: FgCyan, For: FgCyan, If: FgCyan, Package: FgCyan, Import: FgCyan, Func: FgCyan, Var: FgCyan, } type Editor struct { filePath string keyChan chan rune timeChan chan messageType crow int ccol int scroolrow int rows []*Row terminal *Terminal n int // numberOfRows debug bool // for debug } type Terminal struct { termios *unix.Termios width int height int } type Row struct { chars *GapTable // render } func (e *Editor) debugPrint(a ...interface{}) { if e.debug { _, _ = fmt.Fprintln(os.Stderr, a...) } } func (e *Editor) debugDetailPrint(a ...interface{}) { if e.debug { _, _ = fmt.Fprintf(os.Stderr, "%+v\n", a...) } } func (e *Editor) debugRowRunes() { if e.debug { i := 0 for i < e.n { _, _ = fmt.Fprintln(os.Stderr, i, ":", e.rows[i].chars.Runes()) i += 1 } } } // Terminal func makeRaw(fd int) *unix.Termios { termios, err := unix.IoctlGetTermios(fd, unix.TIOCGETA) if err != nil { panic(err) } termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON termios.Oflag &^= unix.OPOST termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN termios.Cflag &^= unix.CSIZE | unix.PARENB termios.Cflag |= unix.CS8 termios.Cc[unix.VMIN] = 1 termios.Cc[unix.VTIME] = 0 if err := unix.IoctlSetTermios(fd, unix.TIOCSETA, termios); err != nil { panic(err) } return termios } func (e *Editor) restoreTerminal(fd int) { if err := unix.IoctlSetTermios(fd, unix.TIOCSETA, e.terminal.termios); err != nil { panic(err) } } func getWindowSize(fd int) (int, int) { ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) if err != nil { panic(err) } return int(ws.Col), int(ws.Row) } func (e *Editor) initTerminal() { e.flush() e.writeHelpMenu(helpMessage) e.writeStatusBar() e.moveCursor(e.crow, e.ccol) } func (e *Editor) writeHelpMenu(message string) { prevRow, prevCol := e.crow, e.ccol for i, ch := range message { e.moveCursor(e.terminal.height+1, i) e.write([]byte(string(ch))) } for i := len(message); i < e.terminal.width; i++ { e.moveCursor(e.terminal.height+1, i) e.write([]byte{' '}) } e.moveCursor(prevRow, prevCol) } func (e *Editor) writeStatusBar() { e.setBgColor(BgCyan) defer e.setBgColor(BgBlack) // Write file name for i, ch := range e.filePath { e.moveCursor(e.terminal.height, i) e.write([]byte(string(ch))) } // Write Spacer for i := len(e.filePath); i < e.terminal.width; i++ { e.moveCursor(e.terminal.height, i) e.write([]byte{' '}) } } // Views func (e *Editor) write(b []byte) { syscall.Write(0, b) } func (e *Editor) writeWithColor(b []byte, colors []color) { var newBuf []byte for i, c := range colors { s := fmt.Sprintf("\033[%dm", c) newBuf = append(newBuf, []byte(s)...) newBuf = append(newBuf, b[i]) } syscall.Write(0, newBuf) } func (e *Editor) highlight(b []byte) []color { colors := make([]color, len(b)) for i := range colors { colors[i] = DummyColor } // ASCII-only ascii := string(b) // Keywords for key := range keywordColor { index := strings.Index(ascii, string(key)) if index != -1 { for i := 0; i < len(string(key)); i += 1 { colors[index+i] = keywordColor[key] } } } // String Literal isStringLit := false for i, b := range ascii { if b == '"' || isStringLit { if b == '"' { isStringLit = !isStringLit } colors[i] = FgGreen } } return colors } func (e *Editor) writeRow(r *Row) { var buf []byte for _, r := range r.chars.Runes() { buf = append(buf, []byte(string(r))...) } e.moveCursor(e.crow, 0) e.flushRow() // If the extension of fileName is .go, write with highlights. if filepath.Ext(e.filePath) == ".go" { colors := e.highlight(buf) e.writeWithColor(buf, colors) } else { e.write(buf) } } func (e *Editor) flush() { e.write([]byte("\033[2J")) } func (e *Editor) flushRow() { e.write([]byte("\033[2K")) } func (e *Editor) setBgColor(color color) { s := fmt.Sprintf("\033[%dm", color) e.write([]byte(s)) } func (e *Editor) moveCursor(row, col int) { s := fmt.Sprintf("\033[%d;%dH", row+1, col+1) // 0-origin to 1-origin e.write([]byte(s)) } func (e *Editor) updateRowRunes(row *Row) { if e.crow < e.terminal.height { e.debugPrint("DEBUG: row's view updated at", e.crow + e.scroolrow, "for", row.chars.Runes()) e.writeRow(row) } } func (e *Editor) refreshAllRows() { for i := 0; i < e.terminal.height; i += 1 { e.crow = i e.writeRow(e.rows[e.scroolrow+i]) } } func (e *Editor) setRowPos(row int) { if row >= e.n { row = e.n - 1 } if row < 0 { if e.scroolrow > 0 { e.scroolrow -= 1 e.refreshAllRows() } row = 0 } if row >= e.terminal.height { if row+e.scroolrow <= e.n { e.scroolrow += 1 } row = e.terminal.height - 1 e.refreshAllRows() } e.crow = row e.moveCursor(row, e.ccol) } func (e *Editor) setColPos(col int) { if col < 0 { col = 0 } if col >= e.currentRow().visibleLen() { col = e.currentRow().visibleLen() } if col >= e.terminal.width { col = e.terminal.width - 1 } e.ccol = col e.moveCursor(e.crow, e.ccol) } func (e *Editor) setRowCol(row int, col int) { if row > e.n && col > e.currentRow().visibleLen() { return } e.setRowPos(row) e.setColPos(col) } // Models func (r *Row) deleteAt(col int) { if col >= r.len() { return } r.chars.DeleteAt(col) } func (r *Row) insertAt(colPos int, newRune rune) { if colPos > r.len() { colPos = r.len() } r.chars.InsertAt(colPos, newRune) } func (r *Row) len() int { return r.chars.Len() } func (r *Row) visibleLen() int { return r.chars.VisibleLen() } func (e *Editor) currentRow() *Row { return e.rows[e.crow + e.scroolrow] } func (e *Editor) deleteRune(row *Row, col int) { row.deleteAt(col) e.updateRowRunes(row) e.setRowCol(e.crow, e.ccol - 1) } func (e *Editor) insertRune(row *Row, col int, newRune rune) { row.insertAt(col, newRune) e.updateRowRunes(row) } func (e *Editor) deleteRow(row int) { e.rows = append(e.rows[:row], e.rows[row+1:]...) e.n -= 1 prevRowPos := e.crow e.refreshAllRows() e.crow = prevRowPos } func (e *Editor) replaceRune(row int, newRune []rune) { gt := NewGapTable(128) for _, r := range newRune { gt.AppendRune(r) } r := &Row{ chars: gt, } e.rows[row] = r prevRowPos := e.crow e.crow = row - e.scroolrow e.updateRowRunes(r) e.crow = prevRowPos } func (e *Editor) insertRow(row int, runes []rune) { gt := NewGapTable(128) for _, r := range runes { gt.AppendRune(r) } r := &Row{ chars: gt, } // https://github.com/golang/go/wiki/SliceTricks e.rows = append(e.rows[:row], append([]*Row{ r }, e.rows[row:]...)...) e.n += 1 e.reallocBufferIfNeeded() prevRowPos := e.crow e.refreshAllRows() e.crow = prevRowPos } func (e *Editor) reallocBufferIfNeeded() { if e.n == len(e.rows) { newCap := cap(e.rows) * 2 newRows := make([]*Row, newCap) copy(newRows, e.rows) e.rows = newRows e.debugPrint("DEBUG: realloc occurred") } } func (e *Editor) numberOfRunesInRow() int { return e.currentRow().chars.Len() } func (e *Editor) backspace() { row := e.currentRow() if e.ccol == 0 { if e.crow + e.scroolrow > 0 { prevRowPos := e.crow + e.scroolrow - 1 prevRow := e.rows[prevRowPos] // Update the previous row. newRunes := append([]rune{}, prevRow.chars.Runes()[:prevRow.len()-1]...) newRunes = append(newRunes, row.chars.Runes()...) e.replaceRune(prevRowPos, newRunes) // Delete the current row currentRowPos := e.crow + e.scroolrow e.deleteRow(currentRowPos) e.setRowCol(e.crow - 1, prevRow.len() - 1) } } else { e.deleteRune(row, e.ccol - 1) } e.debugRowRunes() } func (e *Editor) back() { if e.ccol == 0 { if e.crow > 0 { e.setRowCol(e.crow-1, e.rows[e.crow+e.scroolrow-1].visibleLen()) } } else { e.setRowCol(e.crow, e.ccol-1) } } func (e *Editor) next() { if e.ccol >= e.currentRow().visibleLen() { if e.crow+1 < e.n { e.setRowCol(e.crow+1, 0) } } else { e.setRowCol(e.crow, e.ccol+1) } } func (e *Editor) newLine() { // Insert the new row. currentLineRowPos := e.crow + e.scroolrow currentLineRow := e.rows[currentLineRowPos] newLineRowPos := e.crow + e.scroolrow + 1 nextRowRunes := append([]rune{}, currentLineRow.chars.Runes()[e.ccol:]...) e.insertRow(newLineRowPos, nextRowRunes) // Update the current row. currentRowNewRunes := append([]rune{}, currentLineRow.chars.Runes()[:e.ccol]...) currentRowNewRunes = append(currentRowNewRunes, '\n') e.replaceRune(e.crow + e.scroolrow, currentRowNewRunes) e.setRowCol(e.crow + 1, 0) e.debugRowRunes() } func existsFile(filename string) bool { _, err := os.Stat(filename) return err == nil } func saveFile(filePath string, rows []*Row) { sb := strings.Builder{} for _, r := range rows { if r.len() >= 1 { for _, ch := range r.chars.Runes() { sb.WriteRune(ch) } } } _ = ioutil.WriteFile(filePath, []byte(sb.String()), 0644) } func loadFile(filePath string) *Editor { e := &Editor{ crow: 0, ccol: 0, scroolrow: 0, filePath: filePath, keyChan: make(chan rune), timeChan: make(chan messageType), n: 1, } rows := makeRows() bytes, err := ioutil.ReadFile(filePath) if err != nil { panic(err) } gt := NewGapTable(128) for _, b := range bytes
rows[e.n-1] = &Row{chars: gt} e.rows = rows return e } func (e *Editor) exit() { e.restoreTerminal(0) } func (e *Editor) parseKey(b []byte) (rune, int) { // Try parsing escape sequence if len(b) == 3 { if b[0] == byte(27) && b[1] == '[' { switch b[2] { case 'A': return ArrowUp, 3 case 'B': return ArrowDown, 3 case 'C': return ArrowRight, 3 case 'D': return ArrowLeft, 3 default: return DummyKey, 0 } } } // parse bytes as UTF-8. return utf8.DecodeRune(b) } func (e *Editor) readKeys() { buf := make([]byte, 64) for { if n, err := syscall.Read(0, buf); err == nil { b := buf[:n] for { r, n := e.parseKey(b) if n == 0 { break } e.keyChan <- r b = b[n:] } } } } func (e *Editor) interpretKey() { for { r := <-e.keyChan switch r { case ControlA: e.setRowCol(e.crow, 0) case ControlB, ArrowLeft: e.back() case ControlC: e.exit() return case ControlE: e.setRowCol(e.crow, e.numberOfRunesInRow()) case ControlF, ArrowRight: e.next() case ControlH, BackSpace: e.backspace() case ControlN, ArrowDown: e.setRowCol(e.crow+1, e.ccol) case Tab: for i := 0; i < 4; i += 1 { e.insertRune(e.currentRow(), e.ccol, rune(' ')) } e.setColPos(e.ccol + 4) case Enter: e.newLine() case ControlS: saveFile(e.filePath, e.rows) e.writeHelpMenu("Saved!") e.timeChan <- resetMessage case ControlP, ArrowUp: e.setRowCol(e.crow-1, e.ccol) // for debug case ControlV: e.debugDetailPrint(e) default: e.insertRune(e.currentRow(), e.ccol, r) e.setColPos(e.ccol + 1) } } } func (e *Editor) pollTimerEvent() { for { switch <-e.timeChan { case resetMessage: t := time.NewTimer(2 * time.Second) <-t.C e.writeHelpMenu(helpMessage) } } } func newTerminal(fd int) *Terminal { termios := makeRaw(fd) width, height := getWindowSize(fd) terminal := &Terminal{ termios: termios, width: width, height: height - 2, // for status, message bar } return terminal } func makeRows() []*Row { var rows = make([]*Row, 1024) // not good for i := range rows { rows[i] = &Row{ chars: NewGapTable(128), } } return rows } func newEditor(filePath string, debug bool) *Editor { terminal := newTerminal(0) if existsFile(filePath) { e := loadFile(filePath) e.debug = debug e.terminal = terminal return e } rows := makeRows() return &Editor{ crow: 0, ccol: 0, scroolrow: 0, rows: rows, filePath: filePath, keyChan: make(chan rune), timeChan: make(chan messageType), terminal: terminal, n: 1, debug: debug, } } func run(filePath string, debug bool) { e := newEditor(filePath, debug) e.initTerminal() e.refreshAllRows() e.setRowCol(0, 0) go e.readKeys() go e.pollTimerEvent() e.interpretKey() } func main() { flag.Parse() if flag.NArg() < 1 || flag.NArg() > 3 { fmt.Println("Usage: mille <filename> [--debug]") return } debug := flag.NArg() == 2 && flag.Arg(1) == "--debug" run(flag.Arg(0), debug) }
{ // Treat TAB as 4 spaces. if b == Tab { gt.AppendRune(rune(0x20)) gt.AppendRune(rune(0x20)) gt.AppendRune(rune(0x20)) gt.AppendRune(rune(0x20)) continue } // ASCII-only gt.AppendRune(rune(b)) if b == '\n' { rows[e.n-1] = &Row{chars: gt} e.n += 1 gt = NewGapTable(128) } }
conditional_block
main.go
package main import ( "flag" "fmt" "golang.org/x/sys/unix" "io/ioutil" "os" "path/filepath" "strings" "syscall" "time" "unicode/utf8" ) // Key Definitions const ( DummyKey = -1 ControlA = 1 ControlB = 2 ControlC = 3 ControlE = 5 ControlF = 6 ControlH = 8 Tab = 9 Enter = 13 ControlN = 14 ControlP = 16 ControlS = 19 ControlV = 22 BackSpace = 127 ArrowUp = 1000 ArrowDown = 1001 ArrowRight = 1002 ArrowLeft = 1003 ) // Color Definition type color int const ( DummyColor color = 37 FgGreen = 32 FgCyan = 36 BgBlack = 40 BgCyan = 46 ) const ( helpMessage = "HELP: Ctrl+S = Save / Ctrl+C = Quit" ) type messageType int const ( resetMessage messageType = iota + 1 ) type Keyword string const ( Break Keyword = "break" Default = "default" Func = "func" Interface = "interface" Select = "select" Case = "case" Defer = "defer" Go = "go" Map = "map" Struct = "struct" Chan = "chan" Else = "else" Goto = "goto" Package = "package" Switch = "switch" Const = "const" Fallthrough = "fallthrough" If = "if" Range = "range" Type = "type" Continue = "continue" For = "for" Import = "import" Return = "return" Var = "var" ) var keywordColor = map[Keyword]color{ Break: FgCyan, Default: FgCyan, Interface: FgCyan, Select: FgCyan, Case: FgCyan, Defer: FgCyan, Go: FgCyan, Map: FgCyan, Struct: FgCyan, Chan: FgCyan, Else: FgCyan, Goto: FgCyan, Switch: FgCyan, Const: FgCyan, Fallthrough: FgCyan, Return: FgCyan, Range: FgCyan, Type: FgCyan, Continue: FgCyan, For: FgCyan, If: FgCyan, Package: FgCyan, Import: FgCyan, Func: FgCyan, Var: FgCyan, } type Editor struct { filePath string keyChan chan rune timeChan chan messageType crow int ccol int scroolrow int rows []*Row terminal *Terminal n int // numberOfRows debug bool // for debug } type Terminal struct { termios *unix.Termios width int height int } type Row struct { chars *GapTable // render } func (e *Editor) debugPrint(a ...interface{}) { if e.debug { _, _ = fmt.Fprintln(os.Stderr, a...) } } func (e *Editor) debugDetailPrint(a ...interface{}) { if e.debug { _, _ = fmt.Fprintf(os.Stderr, "%+v\n", a...) } } func (e *Editor) debugRowRunes() { if e.debug { i := 0 for i < e.n { _, _ = fmt.Fprintln(os.Stderr, i, ":", e.rows[i].chars.Runes()) i += 1 } } } // Terminal func makeRaw(fd int) *unix.Termios { termios, err := unix.IoctlGetTermios(fd, unix.TIOCGETA) if err != nil { panic(err) } termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON termios.Oflag &^= unix.OPOST termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN termios.Cflag &^= unix.CSIZE | unix.PARENB termios.Cflag |= unix.CS8 termios.Cc[unix.VMIN] = 1 termios.Cc[unix.VTIME] = 0 if err := unix.IoctlSetTermios(fd, unix.TIOCSETA, termios); err != nil { panic(err) } return termios } func (e *Editor) restoreTerminal(fd int) { if err := unix.IoctlSetTermios(fd, unix.TIOCSETA, e.terminal.termios); err != nil { panic(err) } } func getWindowSize(fd int) (int, int) { ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) if err != nil { panic(err) } return int(ws.Col), int(ws.Row) } func (e *Editor) initTerminal() { e.flush() e.writeHelpMenu(helpMessage) e.writeStatusBar() e.moveCursor(e.crow, e.ccol) } func (e *Editor) writeHelpMenu(message string) { prevRow, prevCol := e.crow, e.ccol for i, ch := range message { e.moveCursor(e.terminal.height+1, i) e.write([]byte(string(ch))) } for i := len(message); i < e.terminal.width; i++ { e.moveCursor(e.terminal.height+1, i) e.write([]byte{' '}) } e.moveCursor(prevRow, prevCol) } func (e *Editor) writeStatusBar() { e.setBgColor(BgCyan) defer e.setBgColor(BgBlack) // Write file name for i, ch := range e.filePath { e.moveCursor(e.terminal.height, i) e.write([]byte(string(ch))) } // Write Spacer for i := len(e.filePath); i < e.terminal.width; i++ { e.moveCursor(e.terminal.height, i) e.write([]byte{' '}) } } // Views func (e *Editor) write(b []byte) { syscall.Write(0, b) } func (e *Editor) writeWithColor(b []byte, colors []color) { var newBuf []byte for i, c := range colors { s := fmt.Sprintf("\033[%dm", c) newBuf = append(newBuf, []byte(s)...) newBuf = append(newBuf, b[i]) } syscall.Write(0, newBuf) } func (e *Editor) highlight(b []byte) []color { colors := make([]color, len(b)) for i := range colors { colors[i] = DummyColor } // ASCII-only ascii := string(b) // Keywords for key := range keywordColor { index := strings.Index(ascii, string(key)) if index != -1 { for i := 0; i < len(string(key)); i += 1 { colors[index+i] = keywordColor[key] } } } // String Literal isStringLit := false for i, b := range ascii { if b == '"' || isStringLit { if b == '"' { isStringLit = !isStringLit } colors[i] = FgGreen } } return colors } func (e *Editor) writeRow(r *Row) { var buf []byte for _, r := range r.chars.Runes() { buf = append(buf, []byte(string(r))...) } e.moveCursor(e.crow, 0) e.flushRow() // If the extension of fileName is .go, write with highlights. if filepath.Ext(e.filePath) == ".go" { colors := e.highlight(buf) e.writeWithColor(buf, colors) } else { e.write(buf) } } func (e *Editor) flush() { e.write([]byte("\033[2J")) } func (e *Editor) flushRow() { e.write([]byte("\033[2K")) } func (e *Editor) setBgColor(color color) { s := fmt.Sprintf("\033[%dm", color) e.write([]byte(s)) } func (e *Editor)
(row, col int) { s := fmt.Sprintf("\033[%d;%dH", row+1, col+1) // 0-origin to 1-origin e.write([]byte(s)) } func (e *Editor) updateRowRunes(row *Row) { if e.crow < e.terminal.height { e.debugPrint("DEBUG: row's view updated at", e.crow + e.scroolrow, "for", row.chars.Runes()) e.writeRow(row) } } func (e *Editor) refreshAllRows() { for i := 0; i < e.terminal.height; i += 1 { e.crow = i e.writeRow(e.rows[e.scroolrow+i]) } } func (e *Editor) setRowPos(row int) { if row >= e.n { row = e.n - 1 } if row < 0 { if e.scroolrow > 0 { e.scroolrow -= 1 e.refreshAllRows() } row = 0 } if row >= e.terminal.height { if row+e.scroolrow <= e.n { e.scroolrow += 1 } row = e.terminal.height - 1 e.refreshAllRows() } e.crow = row e.moveCursor(row, e.ccol) } func (e *Editor) setColPos(col int) { if col < 0 { col = 0 } if col >= e.currentRow().visibleLen() { col = e.currentRow().visibleLen() } if col >= e.terminal.width { col = e.terminal.width - 1 } e.ccol = col e.moveCursor(e.crow, e.ccol) } func (e *Editor) setRowCol(row int, col int) { if row > e.n && col > e.currentRow().visibleLen() { return } e.setRowPos(row) e.setColPos(col) } // Models func (r *Row) deleteAt(col int) { if col >= r.len() { return } r.chars.DeleteAt(col) } func (r *Row) insertAt(colPos int, newRune rune) { if colPos > r.len() { colPos = r.len() } r.chars.InsertAt(colPos, newRune) } func (r *Row) len() int { return r.chars.Len() } func (r *Row) visibleLen() int { return r.chars.VisibleLen() } func (e *Editor) currentRow() *Row { return e.rows[e.crow + e.scroolrow] } func (e *Editor) deleteRune(row *Row, col int) { row.deleteAt(col) e.updateRowRunes(row) e.setRowCol(e.crow, e.ccol - 1) } func (e *Editor) insertRune(row *Row, col int, newRune rune) { row.insertAt(col, newRune) e.updateRowRunes(row) } func (e *Editor) deleteRow(row int) { e.rows = append(e.rows[:row], e.rows[row+1:]...) e.n -= 1 prevRowPos := e.crow e.refreshAllRows() e.crow = prevRowPos } func (e *Editor) replaceRune(row int, newRune []rune) { gt := NewGapTable(128) for _, r := range newRune { gt.AppendRune(r) } r := &Row{ chars: gt, } e.rows[row] = r prevRowPos := e.crow e.crow = row - e.scroolrow e.updateRowRunes(r) e.crow = prevRowPos } func (e *Editor) insertRow(row int, runes []rune) { gt := NewGapTable(128) for _, r := range runes { gt.AppendRune(r) } r := &Row{ chars: gt, } // https://github.com/golang/go/wiki/SliceTricks e.rows = append(e.rows[:row], append([]*Row{ r }, e.rows[row:]...)...) e.n += 1 e.reallocBufferIfNeeded() prevRowPos := e.crow e.refreshAllRows() e.crow = prevRowPos } func (e *Editor) reallocBufferIfNeeded() { if e.n == len(e.rows) { newCap := cap(e.rows) * 2 newRows := make([]*Row, newCap) copy(newRows, e.rows) e.rows = newRows e.debugPrint("DEBUG: realloc occurred") } } func (e *Editor) numberOfRunesInRow() int { return e.currentRow().chars.Len() } func (e *Editor) backspace() { row := e.currentRow() if e.ccol == 0 { if e.crow + e.scroolrow > 0 { prevRowPos := e.crow + e.scroolrow - 1 prevRow := e.rows[prevRowPos] // Update the previous row. newRunes := append([]rune{}, prevRow.chars.Runes()[:prevRow.len()-1]...) newRunes = append(newRunes, row.chars.Runes()...) e.replaceRune(prevRowPos, newRunes) // Delete the current row currentRowPos := e.crow + e.scroolrow e.deleteRow(currentRowPos) e.setRowCol(e.crow - 1, prevRow.len() - 1) } } else { e.deleteRune(row, e.ccol - 1) } e.debugRowRunes() } func (e *Editor) back() { if e.ccol == 0 { if e.crow > 0 { e.setRowCol(e.crow-1, e.rows[e.crow+e.scroolrow-1].visibleLen()) } } else { e.setRowCol(e.crow, e.ccol-1) } } func (e *Editor) next() { if e.ccol >= e.currentRow().visibleLen() { if e.crow+1 < e.n { e.setRowCol(e.crow+1, 0) } } else { e.setRowCol(e.crow, e.ccol+1) } } func (e *Editor) newLine() { // Insert the new row. currentLineRowPos := e.crow + e.scroolrow currentLineRow := e.rows[currentLineRowPos] newLineRowPos := e.crow + e.scroolrow + 1 nextRowRunes := append([]rune{}, currentLineRow.chars.Runes()[e.ccol:]...) e.insertRow(newLineRowPos, nextRowRunes) // Update the current row. currentRowNewRunes := append([]rune{}, currentLineRow.chars.Runes()[:e.ccol]...) currentRowNewRunes = append(currentRowNewRunes, '\n') e.replaceRune(e.crow + e.scroolrow, currentRowNewRunes) e.setRowCol(e.crow + 1, 0) e.debugRowRunes() } func existsFile(filename string) bool { _, err := os.Stat(filename) return err == nil } func saveFile(filePath string, rows []*Row) { sb := strings.Builder{} for _, r := range rows { if r.len() >= 1 { for _, ch := range r.chars.Runes() { sb.WriteRune(ch) } } } _ = ioutil.WriteFile(filePath, []byte(sb.String()), 0644) } func loadFile(filePath string) *Editor { e := &Editor{ crow: 0, ccol: 0, scroolrow: 0, filePath: filePath, keyChan: make(chan rune), timeChan: make(chan messageType), n: 1, } rows := makeRows() bytes, err := ioutil.ReadFile(filePath) if err != nil { panic(err) } gt := NewGapTable(128) for _, b := range bytes { // Treat TAB as 4 spaces. if b == Tab { gt.AppendRune(rune(0x20)) gt.AppendRune(rune(0x20)) gt.AppendRune(rune(0x20)) gt.AppendRune(rune(0x20)) continue } // ASCII-only gt.AppendRune(rune(b)) if b == '\n' { rows[e.n-1] = &Row{chars: gt} e.n += 1 gt = NewGapTable(128) } } rows[e.n-1] = &Row{chars: gt} e.rows = rows return e } func (e *Editor) exit() { e.restoreTerminal(0) } func (e *Editor) parseKey(b []byte) (rune, int) { // Try parsing escape sequence if len(b) == 3 { if b[0] == byte(27) && b[1] == '[' { switch b[2] { case 'A': return ArrowUp, 3 case 'B': return ArrowDown, 3 case 'C': return ArrowRight, 3 case 'D': return ArrowLeft, 3 default: return DummyKey, 0 } } } // parse bytes as UTF-8. return utf8.DecodeRune(b) } func (e *Editor) readKeys() { buf := make([]byte, 64) for { if n, err := syscall.Read(0, buf); err == nil { b := buf[:n] for { r, n := e.parseKey(b) if n == 0 { break } e.keyChan <- r b = b[n:] } } } } func (e *Editor) interpretKey() { for { r := <-e.keyChan switch r { case ControlA: e.setRowCol(e.crow, 0) case ControlB, ArrowLeft: e.back() case ControlC: e.exit() return case ControlE: e.setRowCol(e.crow, e.numberOfRunesInRow()) case ControlF, ArrowRight: e.next() case ControlH, BackSpace: e.backspace() case ControlN, ArrowDown: e.setRowCol(e.crow+1, e.ccol) case Tab: for i := 0; i < 4; i += 1 { e.insertRune(e.currentRow(), e.ccol, rune(' ')) } e.setColPos(e.ccol + 4) case Enter: e.newLine() case ControlS: saveFile(e.filePath, e.rows) e.writeHelpMenu("Saved!") e.timeChan <- resetMessage case ControlP, ArrowUp: e.setRowCol(e.crow-1, e.ccol) // for debug case ControlV: e.debugDetailPrint(e) default: e.insertRune(e.currentRow(), e.ccol, r) e.setColPos(e.ccol + 1) } } } func (e *Editor) pollTimerEvent() { for { switch <-e.timeChan { case resetMessage: t := time.NewTimer(2 * time.Second) <-t.C e.writeHelpMenu(helpMessage) } } } func newTerminal(fd int) *Terminal { termios := makeRaw(fd) width, height := getWindowSize(fd) terminal := &Terminal{ termios: termios, width: width, height: height - 2, // for status, message bar } return terminal } func makeRows() []*Row { var rows = make([]*Row, 1024) // not good for i := range rows { rows[i] = &Row{ chars: NewGapTable(128), } } return rows } func newEditor(filePath string, debug bool) *Editor { terminal := newTerminal(0) if existsFile(filePath) { e := loadFile(filePath) e.debug = debug e.terminal = terminal return e } rows := makeRows() return &Editor{ crow: 0, ccol: 0, scroolrow: 0, rows: rows, filePath: filePath, keyChan: make(chan rune), timeChan: make(chan messageType), terminal: terminal, n: 1, debug: debug, } } func run(filePath string, debug bool) { e := newEditor(filePath, debug) e.initTerminal() e.refreshAllRows() e.setRowCol(0, 0) go e.readKeys() go e.pollTimerEvent() e.interpretKey() } func main() { flag.Parse() if flag.NArg() < 1 || flag.NArg() > 3 { fmt.Println("Usage: mille <filename> [--debug]") return } debug := flag.NArg() == 2 && flag.Arg(1) == "--debug" run(flag.Arg(0), debug) }
moveCursor
identifier_name
main.go
package main import ( "flag" "fmt" "golang.org/x/sys/unix" "io/ioutil" "os" "path/filepath" "strings" "syscall" "time" "unicode/utf8" ) // Key Definitions const ( DummyKey = -1 ControlA = 1 ControlB = 2 ControlC = 3 ControlE = 5 ControlF = 6 ControlH = 8 Tab = 9 Enter = 13 ControlN = 14 ControlP = 16 ControlS = 19 ControlV = 22 BackSpace = 127 ArrowUp = 1000 ArrowDown = 1001 ArrowRight = 1002 ArrowLeft = 1003 ) // Color Definition type color int const ( DummyColor color = 37 FgGreen = 32 FgCyan = 36 BgBlack = 40 BgCyan = 46 ) const ( helpMessage = "HELP: Ctrl+S = Save / Ctrl+C = Quit" ) type messageType int const ( resetMessage messageType = iota + 1 ) type Keyword string const ( Break Keyword = "break" Default = "default" Func = "func" Interface = "interface" Select = "select" Case = "case" Defer = "defer" Go = "go" Map = "map" Struct = "struct" Chan = "chan" Else = "else" Goto = "goto" Package = "package" Switch = "switch" Const = "const" Fallthrough = "fallthrough" If = "if" Range = "range" Type = "type" Continue = "continue" For = "for" Import = "import" Return = "return" Var = "var" ) var keywordColor = map[Keyword]color{ Break: FgCyan, Default: FgCyan, Interface: FgCyan, Select: FgCyan, Case: FgCyan, Defer: FgCyan, Go: FgCyan, Map: FgCyan, Struct: FgCyan, Chan: FgCyan, Else: FgCyan, Goto: FgCyan, Switch: FgCyan, Const: FgCyan, Fallthrough: FgCyan, Return: FgCyan, Range: FgCyan, Type: FgCyan, Continue: FgCyan, For: FgCyan, If: FgCyan, Package: FgCyan, Import: FgCyan, Func: FgCyan, Var: FgCyan, } type Editor struct { filePath string keyChan chan rune timeChan chan messageType crow int ccol int scroolrow int rows []*Row terminal *Terminal n int // numberOfRows debug bool // for debug } type Terminal struct { termios *unix.Termios width int height int } type Row struct { chars *GapTable // render } func (e *Editor) debugPrint(a ...interface{}) { if e.debug { _, _ = fmt.Fprintln(os.Stderr, a...) } } func (e *Editor) debugDetailPrint(a ...interface{}) { if e.debug { _, _ = fmt.Fprintf(os.Stderr, "%+v\n", a...) } } func (e *Editor) debugRowRunes() { if e.debug { i := 0 for i < e.n { _, _ = fmt.Fprintln(os.Stderr, i, ":", e.rows[i].chars.Runes()) i += 1 } } } // Terminal func makeRaw(fd int) *unix.Termios { termios, err := unix.IoctlGetTermios(fd, unix.TIOCGETA) if err != nil { panic(err) } termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON termios.Oflag &^= unix.OPOST termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN termios.Cflag &^= unix.CSIZE | unix.PARENB termios.Cflag |= unix.CS8 termios.Cc[unix.VMIN] = 1 termios.Cc[unix.VTIME] = 0 if err := unix.IoctlSetTermios(fd, unix.TIOCSETA, termios); err != nil { panic(err) } return termios } func (e *Editor) restoreTerminal(fd int) { if err := unix.IoctlSetTermios(fd, unix.TIOCSETA, e.terminal.termios); err != nil { panic(err) } } func getWindowSize(fd int) (int, int) { ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) if err != nil { panic(err) } return int(ws.Col), int(ws.Row) } func (e *Editor) initTerminal() { e.flush() e.writeHelpMenu(helpMessage) e.writeStatusBar() e.moveCursor(e.crow, e.ccol) } func (e *Editor) writeHelpMenu(message string) { prevRow, prevCol := e.crow, e.ccol for i, ch := range message { e.moveCursor(e.terminal.height+1, i) e.write([]byte(string(ch))) } for i := len(message); i < e.terminal.width; i++ { e.moveCursor(e.terminal.height+1, i) e.write([]byte{' '}) } e.moveCursor(prevRow, prevCol) } func (e *Editor) writeStatusBar() { e.setBgColor(BgCyan) defer e.setBgColor(BgBlack) // Write file name for i, ch := range e.filePath { e.moveCursor(e.terminal.height, i) e.write([]byte(string(ch))) } // Write Spacer for i := len(e.filePath); i < e.terminal.width; i++ { e.moveCursor(e.terminal.height, i) e.write([]byte{' '}) } } // Views func (e *Editor) write(b []byte) { syscall.Write(0, b) } func (e *Editor) writeWithColor(b []byte, colors []color) { var newBuf []byte for i, c := range colors { s := fmt.Sprintf("\033[%dm", c) newBuf = append(newBuf, []byte(s)...) newBuf = append(newBuf, b[i]) } syscall.Write(0, newBuf) } func (e *Editor) highlight(b []byte) []color { colors := make([]color, len(b)) for i := range colors { colors[i] = DummyColor } // ASCII-only ascii := string(b) // Keywords for key := range keywordColor { index := strings.Index(ascii, string(key)) if index != -1 { for i := 0; i < len(string(key)); i += 1 { colors[index+i] = keywordColor[key] } } } // String Literal isStringLit := false for i, b := range ascii { if b == '"' || isStringLit { if b == '"' { isStringLit = !isStringLit } colors[i] = FgGreen } } return colors } func (e *Editor) writeRow(r *Row) { var buf []byte for _, r := range r.chars.Runes() { buf = append(buf, []byte(string(r))...) } e.moveCursor(e.crow, 0) e.flushRow() // If the extension of fileName is .go, write with highlights. if filepath.Ext(e.filePath) == ".go" { colors := e.highlight(buf) e.writeWithColor(buf, colors) } else { e.write(buf) } } func (e *Editor) flush() { e.write([]byte("\033[2J")) } func (e *Editor) flushRow() { e.write([]byte("\033[2K")) } func (e *Editor) setBgColor(color color) { s := fmt.Sprintf("\033[%dm", color) e.write([]byte(s)) } func (e *Editor) moveCursor(row, col int) { s := fmt.Sprintf("\033[%d;%dH", row+1, col+1) // 0-origin to 1-origin e.write([]byte(s)) } func (e *Editor) updateRowRunes(row *Row) { if e.crow < e.terminal.height { e.debugPrint("DEBUG: row's view updated at", e.crow + e.scroolrow, "for", row.chars.Runes()) e.writeRow(row) } } func (e *Editor) refreshAllRows() { for i := 0; i < e.terminal.height; i += 1 { e.crow = i e.writeRow(e.rows[e.scroolrow+i]) } } func (e *Editor) setRowPos(row int) { if row >= e.n { row = e.n - 1 } if row < 0 { if e.scroolrow > 0 { e.scroolrow -= 1 e.refreshAllRows() } row = 0 } if row >= e.terminal.height { if row+e.scroolrow <= e.n { e.scroolrow += 1 } row = e.terminal.height - 1 e.refreshAllRows() } e.crow = row e.moveCursor(row, e.ccol) } func (e *Editor) setColPos(col int) { if col < 0 { col = 0 } if col >= e.currentRow().visibleLen() { col = e.currentRow().visibleLen() } if col >= e.terminal.width { col = e.terminal.width - 1 } e.ccol = col e.moveCursor(e.crow, e.ccol) } func (e *Editor) setRowCol(row int, col int) { if row > e.n && col > e.currentRow().visibleLen() { return } e.setRowPos(row) e.setColPos(col) } // Models func (r *Row) deleteAt(col int)
func (r *Row) insertAt(colPos int, newRune rune) { if colPos > r.len() { colPos = r.len() } r.chars.InsertAt(colPos, newRune) } func (r *Row) len() int { return r.chars.Len() } func (r *Row) visibleLen() int { return r.chars.VisibleLen() } func (e *Editor) currentRow() *Row { return e.rows[e.crow + e.scroolrow] } func (e *Editor) deleteRune(row *Row, col int) { row.deleteAt(col) e.updateRowRunes(row) e.setRowCol(e.crow, e.ccol - 1) } func (e *Editor) insertRune(row *Row, col int, newRune rune) { row.insertAt(col, newRune) e.updateRowRunes(row) } func (e *Editor) deleteRow(row int) { e.rows = append(e.rows[:row], e.rows[row+1:]...) e.n -= 1 prevRowPos := e.crow e.refreshAllRows() e.crow = prevRowPos } func (e *Editor) replaceRune(row int, newRune []rune) { gt := NewGapTable(128) for _, r := range newRune { gt.AppendRune(r) } r := &Row{ chars: gt, } e.rows[row] = r prevRowPos := e.crow e.crow = row - e.scroolrow e.updateRowRunes(r) e.crow = prevRowPos } func (e *Editor) insertRow(row int, runes []rune) { gt := NewGapTable(128) for _, r := range runes { gt.AppendRune(r) } r := &Row{ chars: gt, } // https://github.com/golang/go/wiki/SliceTricks e.rows = append(e.rows[:row], append([]*Row{ r }, e.rows[row:]...)...) e.n += 1 e.reallocBufferIfNeeded() prevRowPos := e.crow e.refreshAllRows() e.crow = prevRowPos } func (e *Editor) reallocBufferIfNeeded() { if e.n == len(e.rows) { newCap := cap(e.rows) * 2 newRows := make([]*Row, newCap) copy(newRows, e.rows) e.rows = newRows e.debugPrint("DEBUG: realloc occurred") } } func (e *Editor) numberOfRunesInRow() int { return e.currentRow().chars.Len() } func (e *Editor) backspace() { row := e.currentRow() if e.ccol == 0 { if e.crow + e.scroolrow > 0 { prevRowPos := e.crow + e.scroolrow - 1 prevRow := e.rows[prevRowPos] // Update the previous row. newRunes := append([]rune{}, prevRow.chars.Runes()[:prevRow.len()-1]...) newRunes = append(newRunes, row.chars.Runes()...) e.replaceRune(prevRowPos, newRunes) // Delete the current row currentRowPos := e.crow + e.scroolrow e.deleteRow(currentRowPos) e.setRowCol(e.crow - 1, prevRow.len() - 1) } } else { e.deleteRune(row, e.ccol - 1) } e.debugRowRunes() } func (e *Editor) back() { if e.ccol == 0 { if e.crow > 0 { e.setRowCol(e.crow-1, e.rows[e.crow+e.scroolrow-1].visibleLen()) } } else { e.setRowCol(e.crow, e.ccol-1) } } func (e *Editor) next() { if e.ccol >= e.currentRow().visibleLen() { if e.crow+1 < e.n { e.setRowCol(e.crow+1, 0) } } else { e.setRowCol(e.crow, e.ccol+1) } } func (e *Editor) newLine() { // Insert the new row. currentLineRowPos := e.crow + e.scroolrow currentLineRow := e.rows[currentLineRowPos] newLineRowPos := e.crow + e.scroolrow + 1 nextRowRunes := append([]rune{}, currentLineRow.chars.Runes()[e.ccol:]...) e.insertRow(newLineRowPos, nextRowRunes) // Update the current row. currentRowNewRunes := append([]rune{}, currentLineRow.chars.Runes()[:e.ccol]...) currentRowNewRunes = append(currentRowNewRunes, '\n') e.replaceRune(e.crow + e.scroolrow, currentRowNewRunes) e.setRowCol(e.crow + 1, 0) e.debugRowRunes() } func existsFile(filename string) bool { _, err := os.Stat(filename) return err == nil } func saveFile(filePath string, rows []*Row) { sb := strings.Builder{} for _, r := range rows { if r.len() >= 1 { for _, ch := range r.chars.Runes() { sb.WriteRune(ch) } } } _ = ioutil.WriteFile(filePath, []byte(sb.String()), 0644) } func loadFile(filePath string) *Editor { e := &Editor{ crow: 0, ccol: 0, scroolrow: 0, filePath: filePath, keyChan: make(chan rune), timeChan: make(chan messageType), n: 1, } rows := makeRows() bytes, err := ioutil.ReadFile(filePath) if err != nil { panic(err) } gt := NewGapTable(128) for _, b := range bytes { // Treat TAB as 4 spaces. if b == Tab { gt.AppendRune(rune(0x20)) gt.AppendRune(rune(0x20)) gt.AppendRune(rune(0x20)) gt.AppendRune(rune(0x20)) continue } // ASCII-only gt.AppendRune(rune(b)) if b == '\n' { rows[e.n-1] = &Row{chars: gt} e.n += 1 gt = NewGapTable(128) } } rows[e.n-1] = &Row{chars: gt} e.rows = rows return e } func (e *Editor) exit() { e.restoreTerminal(0) } func (e *Editor) parseKey(b []byte) (rune, int) { // Try parsing escape sequence if len(b) == 3 { if b[0] == byte(27) && b[1] == '[' { switch b[2] { case 'A': return ArrowUp, 3 case 'B': return ArrowDown, 3 case 'C': return ArrowRight, 3 case 'D': return ArrowLeft, 3 default: return DummyKey, 0 } } } // parse bytes as UTF-8. return utf8.DecodeRune(b) } func (e *Editor) readKeys() { buf := make([]byte, 64) for { if n, err := syscall.Read(0, buf); err == nil { b := buf[:n] for { r, n := e.parseKey(b) if n == 0 { break } e.keyChan <- r b = b[n:] } } } } func (e *Editor) interpretKey() { for { r := <-e.keyChan switch r { case ControlA: e.setRowCol(e.crow, 0) case ControlB, ArrowLeft: e.back() case ControlC: e.exit() return case ControlE: e.setRowCol(e.crow, e.numberOfRunesInRow()) case ControlF, ArrowRight: e.next() case ControlH, BackSpace: e.backspace() case ControlN, ArrowDown: e.setRowCol(e.crow+1, e.ccol) case Tab: for i := 0; i < 4; i += 1 { e.insertRune(e.currentRow(), e.ccol, rune(' ')) } e.setColPos(e.ccol + 4) case Enter: e.newLine() case ControlS: saveFile(e.filePath, e.rows) e.writeHelpMenu("Saved!") e.timeChan <- resetMessage case ControlP, ArrowUp: e.setRowCol(e.crow-1, e.ccol) // for debug case ControlV: e.debugDetailPrint(e) default: e.insertRune(e.currentRow(), e.ccol, r) e.setColPos(e.ccol + 1) } } } func (e *Editor) pollTimerEvent() { for { switch <-e.timeChan { case resetMessage: t := time.NewTimer(2 * time.Second) <-t.C e.writeHelpMenu(helpMessage) } } } func newTerminal(fd int) *Terminal { termios := makeRaw(fd) width, height := getWindowSize(fd) terminal := &Terminal{ termios: termios, width: width, height: height - 2, // for status, message bar } return terminal } func makeRows() []*Row { var rows = make([]*Row, 1024) // not good for i := range rows { rows[i] = &Row{ chars: NewGapTable(128), } } return rows } func newEditor(filePath string, debug bool) *Editor { terminal := newTerminal(0) if existsFile(filePath) { e := loadFile(filePath) e.debug = debug e.terminal = terminal return e } rows := makeRows() return &Editor{ crow: 0, ccol: 0, scroolrow: 0, rows: rows, filePath: filePath, keyChan: make(chan rune), timeChan: make(chan messageType), terminal: terminal, n: 1, debug: debug, } } func run(filePath string, debug bool) { e := newEditor(filePath, debug) e.initTerminal() e.refreshAllRows() e.setRowCol(0, 0) go e.readKeys() go e.pollTimerEvent() e.interpretKey() } func main() { flag.Parse() if flag.NArg() < 1 || flag.NArg() > 3 { fmt.Println("Usage: mille <filename> [--debug]") return } debug := flag.NArg() == 2 && flag.Arg(1) == "--debug" run(flag.Arg(0), debug) }
{ if col >= r.len() { return } r.chars.DeleteAt(col) }
identifier_body
decoder.go
// Copyright 2020 Thomas.Hoehenleitner [at] seerose.net // Use of this source code is governed by a license that can be found in the LICENSE file. // Package decoder provides several decoders for differently encoded trice streams. package decoder import ( "encoding/binary" "fmt" "io" "regexp" "strings" "sync" "github.com/rokath/trice/internal/id" ) // TestTable ist a struct slice generated by the trice tool -testTable option. type TestTable []struct { In []byte // byte buffer sequence Exp string // output } const ( // LittleEndian is true for little endian trice data. LittleEndian = true // BigEndian is the flag value for target endianness. BigEndian = false // defaultSize is the beginning receive and sync buffer size. DefaultSize = 64 * 1024 // patNextFormatSpecifier is a regex to find next format specifier in a string (exclude %%*) and ignoring %s // // https://regex101.com/r/BjiD5M/1 // Language C plus from language Go: %b, %F, %q // Partial implemented: %hi, %hu, %ld, %li, %lf, %Lf, %Lu, %lli, %lld // Not implemented: %s //patNextFormatSpecifier = `(?:^|[^%])(%[0-9]*(-|c|d|e|E|f|F|g|G|h|i|l|L|o|O|p|q|u|x|X|n|b))` //patNextFormatSpecifier = `%([+\-#'0-9\.0-9])*(c|d|e|E|f|F|g|G|h|i|l|L|o|O|p|q|u|x|X|n|b|t)` // assumes no `%%` inside string! patNextFormatSpecifier = `%([+\-#'0-9\.0-9])*(b|c|d|e|f|g|E|F|G|h|i|l|L|n|o|O|p|q|t|u|x|X)` // assumes no `%%` inside string! // patNextFormatUSpecifier is a regex to find next format u specifier in a string // It does also match %%u positions! //patNextFormatUSpecifier = `(?:%[0-9]*u)` patNextFormatUSpecifier = `%[0-9]*u` // assumes no `%%` inside string! // patNextFormatISpecifier is a regex to find next format i specifier in a string // It does also match %%i positions! patNextFormatISpecifier = `%[0-9]*i` // assumes no `%%` inside string! // patNextFormatXSpecifier is a regex to find next format x specifier in a string // It does also match %%x positions! // patNextFormatXSpecifier = `(?:%[0-9]*(l|o|O|x|X|b))` patNextFormatXSpecifier = `%[0-9]*(l|o|O|x|X|b|p|t)` // assumes no `%%` inside string! // patNextFormatFSpecifier is a regex to find next format f specifier in a string // It does also match %%f positions! patNextFormatFSpecifier = `%[(+\-0-9\.0-9#]*(e|E|f|F|g|G)` // assumes no `%%` inside string! // patNextFormatBoolSpecifier is a regex to find next format f specifier in a string // It does also match %%t positions! patNextFormatBoolSpecifier = `%t` // assumes no `%%` inside string! // patNextFormatPointerSpecifier is a regex to find next format f specifier in a string // It does also match %%t positions! patNextFormatPointerSpecifier = `%p` // assumes no `%%` inside string! // hints is the help information in case of errors. Hints = "att:Hints:Baudrate? Encoding? Interrupt? Overflow? Parameter count? Password? til.json? Version?" // DefaultStamp32 = "ssss,ms_µs" // "tim:%4d,%03d_%03d " // // DefaultStamp16 = "ms_µs" // "tim: %2d_%03d " UnsignedFormatSpecifier = 0 // %u -> %d SignedFormatSpecifier = 1 // FloatFormatSpecifier = 2 // %f and relatives BooleanFormatSpecifier = 3 // a %t (bool) found PointerFormatSpecifier = 4 // a %p (pointer) found ) var ( // Verbose gives more information on output if set. The value is injected from main packages. Verbose bool // ShowID is used as format string for displaying the first trice ID at the start of each line if not "". ShowID string // decoder.LastTriceID is last decoded ID. It is used for switch -showID. LastTriceID id.TriceID // TestTableMode is a special option for easy decoder test table generation. TestTableMode bool // Unsigned if true, forces hex and in values printed as unsigned values. Unsigned bool matchNextFormatSpecifier = regexp.MustCompile(patNextFormatSpecifier) matchNextFormatUSpecifier = regexp.MustCompile(patNextFormatUSpecifier) matchNextFormatISpecifier = regexp.MustCompile(patNextFormatISpecifier) matchNextFormatXSpecifier = regexp.MustCompile(patNextFormatXSpecifier) matchNextFormatFSpecifier = regexp.MustCompile(patNextFormatFSpecifier) matchNextFormatBoolSpecifier = regexp.MustCompile(patNextFormatBoolSpecifier) matchNextFormatPointerSpecifier = regexp.MustCompile(patNextFormatPointerSpecifier) DebugOut = false // DebugOut enables debug information. DumpLineByteCount int // DumpLineByteCount is the bytes per line for the dumpDec decoder. InitialCycle = true // InitialCycle is a helper for the cycle counter automatic. TargetTimestamp uint64 // targetTimestamp contains target specific timestamp value. TargetLocation uint32 // targetLocation contains 16 bit file id in high and 16 bit line number in low part. TargetStamp string // TargetTimeStampUnit is the target timestamps time base for default formatting. TargetStamp32 string // ShowTargetStamp32 is the format string for target timestamps. TargetStamp16 string // ShowTargetStamp16 is the format string for target timestamps. TargetStamp0 string // ShowTargetStamp0 is the format string for target timestamps. TargetTimeStampUnitPassed bool // TargetTimeStampUnitPassed is true when flag was TargetTimeStampUnit passed. ShowTargetStamp32Passed bool // ShowTargetStamp32Passed is true when flag was TargetTimeStamp32 passed. ShowTargetStamp16Passed bool // ShowTargetStamp16Passed is true when flag was TargetTimeStamp16 passed. ShowTargetStamp0Passed bool // ShowTargetStamp0Passed is true when flag was TargetTimeStamp0 passed. LocationInformationFormatString string // LocationInformationFormatString is the format string for target location: line number and file name. TargetTimestampSize int // TargetTimestampSize is set in dependence of trice type. TargetLocationExists bool // TargetLocationExists is set in dependence of p.COBSModeDescriptor. (obsolete) PackageFraming string // Framing is used for packing. Valid values COBS, TCOBS, TCOBSv1 (same as TCOBS) IDBits = 14 // IDBits holds count of bits used for ID (used at least in trexDecoder) NewlineIndent = -1 // Used for trice messages containing several newlines in format string for formatting. ) // New abstracts the function type for a new decoder. type New func(out io.Writer, lut id.TriceIDLookUp, m *sync.RWMutex, li id.TriceIDLookUpLI, in io.Reader, endian bool) Decoder // Decoder is providing a byte reader returning decoded trice's. // SetInput allows switching the input stream to a different source. type Decoder interface { io.Reader SetInput(io.Reader) } // DecoderData is the common data struct for all decoders. type DecoderData struct { W io.Writer // io.Stdout or the like In io.Reader // in is the inner reader, which is used to get raw bytes InnerBuffer []byte // avoid repeated allocation (trex) IBuf []byte // iBuf holds unprocessed (raw) bytes for interpretation. B []byte // read buffer holds a single decoded TCOBS package, which can contain several trices. B0 []byte // initial value for B Endian bool // endian is true for LittleEndian and false for BigEndian TriceSize int // trice head and payload size as number of bytes ParamSpace int // trice payload size after head SLen int // string length for TRICE_S Lut id.TriceIDLookUp // id look-up map for translation LutMutex *sync.RWMutex // to avoid concurrent map read and map write during map refresh triggered by filewatcher Li id.TriceIDLookUpLI // location information map Trice id.TriceFmt // id.TriceFmt // received trice } // SetInput allows switching the input stream to a different source. // // This function is for easier testing with cycle counters. func (p *DecoderData) SetInput(r io.Reader) { p.In = r } // ReadU16 returns the 2 b bytes as uint16 according the specified endianness func (p *DecoderData) ReadU16(b []byte) uint16 { if p.Endian { return binary.LittleEndian.Uint16(b) } return binary.BigEndian.Uint16(b) } // ReadU32 returns the 4 b bytes as uint32 according the specified endianness func (p *DecoderData) ReadU32(b []byte) uint32 { if p.Endian { return binary.LittleEndian.Uint32(b) } return binary.BigEndian.Uint32(b) } // ReadU64 returns the 8 b bytes as uint64 according the specified endianness func (p *DecoderData) ReadU64(b []byte) uint64 { if p.Endian { return binary.LittleEndian.Uint64(b) } return binary.BigEndian.Uint64(b) } // UReplaceN checks all format specifier in i and replaces %nu with %nd and returns that result as o. // // If a replacement took place on position k u[k] is 1. Afterwards len(u) is amount of found format specifiers. // Additional, if UnsignedHex is true, for FormatX specifiers u[k] is also 1. // If a float format specifier was found at position k, u[k] is 2, // http://www.cplusplus.com/reference/cstdio/printf/ // https://www.codingunit.com/printf-format-specifiers-format-conversions-and-formatted-output func UR
string) (o string, u []int) { o = i i = strings.ReplaceAll(i, "%%", "__") // this makes regex easier and faster var offset int for { s := i[offset:] // remove processed part loc := matchNextFormatSpecifier.FindStringIndex(s) if nil == loc { // no (more) fm found return } offset += loc[1] // track position fm := s[loc[0]:loc[1]] locPointer := matchNextFormatPointerSpecifier.FindStringIndex(fm) if nil != locPointer { // a %p found // This would require `unsafe.Pointer(uintptr(n))` inside unSignedOrSignedOut. // There are false positive windows vet warnings: // https://stackoverflow.com/questions/43767898/casting-a-int-to-a-pointer // https://github.com/golang/go/issues/41205 // As workaround replace %p with %x in the format strings. // Then trice64( "%p", -1 ) could be a problem when using `trice log -unsigned false` // But that we simply ignore right now. o = o[:offset-1] + "x" + o[offset:] // replace %np -> %nx u = append(u, PointerFormatSpecifier) // pointer value continue } locBool := matchNextFormatBoolSpecifier.FindStringIndex(fm) if nil != locBool { // a %t found u = append(u, BooleanFormatSpecifier) // bool value continue } locF := matchNextFormatFSpecifier.FindStringIndex(fm) if nil != locF { // a %nf found u = append(u, FloatFormatSpecifier) // float value continue } locU := matchNextFormatUSpecifier.FindStringIndex(fm) if nil != locU { // a %nu found o = o[:offset-1] + "d" + o[offset:] // replace %nu -> %nd u = append(u, UnsignedFormatSpecifier) // no negative values continue } locI := matchNextFormatISpecifier.FindStringIndex(fm) if nil != locI { // a %ni found o = o[:offset-1] + "d" + o[offset:] // replace %ni -> %nd u = append(u, SignedFormatSpecifier) // also negative values continue } locX := matchNextFormatXSpecifier.FindStringIndex(fm) if nil != locX { // a %nx, %nX or, %no, %nO or %nb found if Unsigned { u = append(u, 0) // no negative values } else { u = append(u, 1) // also negative values } continue } u = append(u, 1) // keep sign in all other cases(also negative values) } } // Dump prints the byte slice as hex in one line func Dump(w io.Writer, b []byte) { for _, x := range b { fmt.Fprintf(w, "%02x ", x) } fmt.Fprintln(w, "") }
eplaceN(i
identifier_name
decoder.go
// Copyright 2020 Thomas.Hoehenleitner [at] seerose.net // Use of this source code is governed by a license that can be found in the LICENSE file. // Package decoder provides several decoders for differently encoded trice streams. package decoder import ( "encoding/binary" "fmt" "io" "regexp" "strings" "sync" "github.com/rokath/trice/internal/id" ) // TestTable ist a struct slice generated by the trice tool -testTable option. type TestTable []struct { In []byte // byte buffer sequence Exp string // output } const ( // LittleEndian is true for little endian trice data. LittleEndian = true // BigEndian is the flag value for target endianness. BigEndian = false // defaultSize is the beginning receive and sync buffer size. DefaultSize = 64 * 1024 // patNextFormatSpecifier is a regex to find next format specifier in a string (exclude %%*) and ignoring %s // // https://regex101.com/r/BjiD5M/1 // Language C plus from language Go: %b, %F, %q // Partial implemented: %hi, %hu, %ld, %li, %lf, %Lf, %Lu, %lli, %lld // Not implemented: %s //patNextFormatSpecifier = `(?:^|[^%])(%[0-9]*(-|c|d|e|E|f|F|g|G|h|i|l|L|o|O|p|q|u|x|X|n|b))` //patNextFormatSpecifier = `%([+\-#'0-9\.0-9])*(c|d|e|E|f|F|g|G|h|i|l|L|o|O|p|q|u|x|X|n|b|t)` // assumes no `%%` inside string! patNextFormatSpecifier = `%([+\-#'0-9\.0-9])*(b|c|d|e|f|g|E|F|G|h|i|l|L|n|o|O|p|q|t|u|x|X)` // assumes no `%%` inside string! // patNextFormatUSpecifier is a regex to find next format u specifier in a string // It does also match %%u positions! //patNextFormatUSpecifier = `(?:%[0-9]*u)` patNextFormatUSpecifier = `%[0-9]*u` // assumes no `%%` inside string! // patNextFormatISpecifier is a regex to find next format i specifier in a string // It does also match %%i positions! patNextFormatISpecifier = `%[0-9]*i` // assumes no `%%` inside string! // patNextFormatXSpecifier is a regex to find next format x specifier in a string // It does also match %%x positions! // patNextFormatXSpecifier = `(?:%[0-9]*(l|o|O|x|X|b))` patNextFormatXSpecifier = `%[0-9]*(l|o|O|x|X|b|p|t)` // assumes no `%%` inside string! // patNextFormatFSpecifier is a regex to find next format f specifier in a string // It does also match %%f positions! patNextFormatFSpecifier = `%[(+\-0-9\.0-9#]*(e|E|f|F|g|G)` // assumes no `%%` inside string! // patNextFormatBoolSpecifier is a regex to find next format f specifier in a string // It does also match %%t positions! patNextFormatBoolSpecifier = `%t` // assumes no `%%` inside string! // patNextFormatPointerSpecifier is a regex to find next format f specifier in a string // It does also match %%t positions! patNextFormatPointerSpecifier = `%p` // assumes no `%%` inside string! // hints is the help information in case of errors. Hints = "att:Hints:Baudrate? Encoding? Interrupt? Overflow? Parameter count? Password? til.json? Version?" // DefaultStamp32 = "ssss,ms_µs" // "tim:%4d,%03d_%03d " // // DefaultStamp16 = "ms_µs" // "tim: %2d_%03d " UnsignedFormatSpecifier = 0 // %u -> %d SignedFormatSpecifier = 1 // FloatFormatSpecifier = 2 // %f and relatives BooleanFormatSpecifier = 3 // a %t (bool) found PointerFormatSpecifier = 4 // a %p (pointer) found ) var ( // Verbose gives more information on output if set. The value is injected from main packages. Verbose bool // ShowID is used as format string for displaying the first trice ID at the start of each line if not "". ShowID string // decoder.LastTriceID is last decoded ID. It is used for switch -showID. LastTriceID id.TriceID // TestTableMode is a special option for easy decoder test table generation. TestTableMode bool // Unsigned if true, forces hex and in values printed as unsigned values. Unsigned bool matchNextFormatSpecifier = regexp.MustCompile(patNextFormatSpecifier) matchNextFormatUSpecifier = regexp.MustCompile(patNextFormatUSpecifier) matchNextFormatISpecifier = regexp.MustCompile(patNextFormatISpecifier) matchNextFormatXSpecifier = regexp.MustCompile(patNextFormatXSpecifier) matchNextFormatFSpecifier = regexp.MustCompile(patNextFormatFSpecifier) matchNextFormatBoolSpecifier = regexp.MustCompile(patNextFormatBoolSpecifier) matchNextFormatPointerSpecifier = regexp.MustCompile(patNextFormatPointerSpecifier) DebugOut = false // DebugOut enables debug information. DumpLineByteCount int // DumpLineByteCount is the bytes per line for the dumpDec decoder. InitialCycle = true // InitialCycle is a helper for the cycle counter automatic. TargetTimestamp uint64 // targetTimestamp contains target specific timestamp value. TargetLocation uint32 // targetLocation contains 16 bit file id in high and 16 bit line number in low part. TargetStamp string // TargetTimeStampUnit is the target timestamps time base for default formatting. TargetStamp32 string // ShowTargetStamp32 is the format string for target timestamps. TargetStamp16 string // ShowTargetStamp16 is the format string for target timestamps. TargetStamp0 string // ShowTargetStamp0 is the format string for target timestamps. TargetTimeStampUnitPassed bool // TargetTimeStampUnitPassed is true when flag was TargetTimeStampUnit passed. ShowTargetStamp32Passed bool // ShowTargetStamp32Passed is true when flag was TargetTimeStamp32 passed. ShowTargetStamp16Passed bool // ShowTargetStamp16Passed is true when flag was TargetTimeStamp16 passed. ShowTargetStamp0Passed bool // ShowTargetStamp0Passed is true when flag was TargetTimeStamp0 passed. LocationInformationFormatString string // LocationInformationFormatString is the format string for target location: line number and file name. TargetTimestampSize int // TargetTimestampSize is set in dependence of trice type. TargetLocationExists bool // TargetLocationExists is set in dependence of p.COBSModeDescriptor. (obsolete) PackageFraming string // Framing is used for packing. Valid values COBS, TCOBS, TCOBSv1 (same as TCOBS) IDBits = 14 // IDBits holds count of bits used for ID (used at least in trexDecoder) NewlineIndent = -1 // Used for trice messages containing several newlines in format string for formatting. ) // New abstracts the function type for a new decoder. type New func(out io.Writer, lut id.TriceIDLookUp, m *sync.RWMutex, li id.TriceIDLookUpLI, in io.Reader, endian bool) Decoder // Decoder is providing a byte reader returning decoded trice's. // SetInput allows switching the input stream to a different source. type Decoder interface { io.Reader SetInput(io.Reader) } // DecoderData is the common data struct for all decoders. type DecoderData struct { W io.Writer // io.Stdout or the like In io.Reader // in is the inner reader, which is used to get raw bytes InnerBuffer []byte // avoid repeated allocation (trex) IBuf []byte // iBuf holds unprocessed (raw) bytes for interpretation. B []byte // read buffer holds a single decoded TCOBS package, which can contain several trices. B0 []byte // initial value for B Endian bool // endian is true for LittleEndian and false for BigEndian TriceSize int // trice head and payload size as number of bytes ParamSpace int // trice payload size after head SLen int // string length for TRICE_S Lut id.TriceIDLookUp // id look-up map for translation LutMutex *sync.RWMutex // to avoid concurrent map read and map write during map refresh triggered by filewatcher Li id.TriceIDLookUpLI // location information map Trice id.TriceFmt // id.TriceFmt // received trice } // SetInput allows switching the input stream to a different source. // // This function is for easier testing with cycle counters. func (p *DecoderData) SetInput(r io.Reader) { p.In = r } // ReadU16 returns the 2 b bytes as uint16 according the specified endianness func (p *DecoderData) ReadU16(b []byte) uint16 { if p.Endian { return binary.LittleEndian.Uint16(b) } return binary.BigEndian.Uint16(b) } // ReadU32 returns the 4 b bytes as uint32 according the specified endianness func (p *DecoderData) ReadU32(b []byte) uint32 { if p.Endian { return binary.LittleEndian.Uint32(b) } return binary.BigEndian.Uint32(b) } // ReadU64 returns the 8 b bytes as uint64 according the specified endianness func (p *DecoderData) ReadU64(b []byte) uint64 { if p.Endian { return binary.LittleEndian.Uint64(b) } return binary.BigEndian.Uint64(b) } // UReplaceN checks all format specifier in i and replaces %nu with %nd and returns that result as o. // // If a replacement took place on position k u[k] is 1. Afterwards len(u) is amount of found format specifiers. // Additional, if UnsignedHex is true, for FormatX specifiers u[k] is also 1. // If a float format specifier was found at position k, u[k] is 2, // http://www.cplusplus.com/reference/cstdio/printf/ // https://www.codingunit.com/printf-format-specifiers-format-conversions-and-formatted-output func UReplaceN(i string) (o string, u []int) {
// Dump prints the byte slice as hex in one line func Dump(w io.Writer, b []byte) { for _, x := range b { fmt.Fprintf(w, "%02x ", x) } fmt.Fprintln(w, "") }
o = i i = strings.ReplaceAll(i, "%%", "__") // this makes regex easier and faster var offset int for { s := i[offset:] // remove processed part loc := matchNextFormatSpecifier.FindStringIndex(s) if nil == loc { // no (more) fm found return } offset += loc[1] // track position fm := s[loc[0]:loc[1]] locPointer := matchNextFormatPointerSpecifier.FindStringIndex(fm) if nil != locPointer { // a %p found // This would require `unsafe.Pointer(uintptr(n))` inside unSignedOrSignedOut. // There are false positive windows vet warnings: // https://stackoverflow.com/questions/43767898/casting-a-int-to-a-pointer // https://github.com/golang/go/issues/41205 // As workaround replace %p with %x in the format strings. // Then trice64( "%p", -1 ) could be a problem when using `trice log -unsigned false` // But that we simply ignore right now. o = o[:offset-1] + "x" + o[offset:] // replace %np -> %nx u = append(u, PointerFormatSpecifier) // pointer value continue } locBool := matchNextFormatBoolSpecifier.FindStringIndex(fm) if nil != locBool { // a %t found u = append(u, BooleanFormatSpecifier) // bool value continue } locF := matchNextFormatFSpecifier.FindStringIndex(fm) if nil != locF { // a %nf found u = append(u, FloatFormatSpecifier) // float value continue } locU := matchNextFormatUSpecifier.FindStringIndex(fm) if nil != locU { // a %nu found o = o[:offset-1] + "d" + o[offset:] // replace %nu -> %nd u = append(u, UnsignedFormatSpecifier) // no negative values continue } locI := matchNextFormatISpecifier.FindStringIndex(fm) if nil != locI { // a %ni found o = o[:offset-1] + "d" + o[offset:] // replace %ni -> %nd u = append(u, SignedFormatSpecifier) // also negative values continue } locX := matchNextFormatXSpecifier.FindStringIndex(fm) if nil != locX { // a %nx, %nX or, %no, %nO or %nb found if Unsigned { u = append(u, 0) // no negative values } else { u = append(u, 1) // also negative values } continue } u = append(u, 1) // keep sign in all other cases(also negative values) } }
identifier_body
decoder.go
// Copyright 2020 Thomas.Hoehenleitner [at] seerose.net // Use of this source code is governed by a license that can be found in the LICENSE file. // Package decoder provides several decoders for differently encoded trice streams. package decoder import ( "encoding/binary" "fmt" "io" "regexp" "strings" "sync" "github.com/rokath/trice/internal/id" ) // TestTable ist a struct slice generated by the trice tool -testTable option. type TestTable []struct { In []byte // byte buffer sequence Exp string // output } const ( // LittleEndian is true for little endian trice data. LittleEndian = true // BigEndian is the flag value for target endianness. BigEndian = false // defaultSize is the beginning receive and sync buffer size. DefaultSize = 64 * 1024 // patNextFormatSpecifier is a regex to find next format specifier in a string (exclude %%*) and ignoring %s // // https://regex101.com/r/BjiD5M/1 // Language C plus from language Go: %b, %F, %q // Partial implemented: %hi, %hu, %ld, %li, %lf, %Lf, %Lu, %lli, %lld // Not implemented: %s //patNextFormatSpecifier = `(?:^|[^%])(%[0-9]*(-|c|d|e|E|f|F|g|G|h|i|l|L|o|O|p|q|u|x|X|n|b))` //patNextFormatSpecifier = `%([+\-#'0-9\.0-9])*(c|d|e|E|f|F|g|G|h|i|l|L|o|O|p|q|u|x|X|n|b|t)` // assumes no `%%` inside string! patNextFormatSpecifier = `%([+\-#'0-9\.0-9])*(b|c|d|e|f|g|E|F|G|h|i|l|L|n|o|O|p|q|t|u|x|X)` // assumes no `%%` inside string! // patNextFormatUSpecifier is a regex to find next format u specifier in a string // It does also match %%u positions! //patNextFormatUSpecifier = `(?:%[0-9]*u)` patNextFormatUSpecifier = `%[0-9]*u` // assumes no `%%` inside string! // patNextFormatISpecifier is a regex to find next format i specifier in a string // It does also match %%i positions! patNextFormatISpecifier = `%[0-9]*i` // assumes no `%%` inside string! // patNextFormatXSpecifier is a regex to find next format x specifier in a string // It does also match %%x positions! // patNextFormatXSpecifier = `(?:%[0-9]*(l|o|O|x|X|b))` patNextFormatXSpecifier = `%[0-9]*(l|o|O|x|X|b|p|t)` // assumes no `%%` inside string! // patNextFormatFSpecifier is a regex to find next format f specifier in a string // It does also match %%f positions! patNextFormatFSpecifier = `%[(+\-0-9\.0-9#]*(e|E|f|F|g|G)` // assumes no `%%` inside string! // patNextFormatBoolSpecifier is a regex to find next format f specifier in a string // It does also match %%t positions! patNextFormatBoolSpecifier = `%t` // assumes no `%%` inside string! // patNextFormatPointerSpecifier is a regex to find next format f specifier in a string // It does also match %%t positions! patNextFormatPointerSpecifier = `%p` // assumes no `%%` inside string! // hints is the help information in case of errors. Hints = "att:Hints:Baudrate? Encoding? Interrupt? Overflow? Parameter count? Password? til.json? Version?" // DefaultStamp32 = "ssss,ms_µs" // "tim:%4d,%03d_%03d " // // DefaultStamp16 = "ms_µs" // "tim: %2d_%03d " UnsignedFormatSpecifier = 0 // %u -> %d SignedFormatSpecifier = 1 // FloatFormatSpecifier = 2 // %f and relatives BooleanFormatSpecifier = 3 // a %t (bool) found PointerFormatSpecifier = 4 // a %p (pointer) found ) var ( // Verbose gives more information on output if set. The value is injected from main packages. Verbose bool // ShowID is used as format string for displaying the first trice ID at the start of each line if not "". ShowID string // decoder.LastTriceID is last decoded ID. It is used for switch -showID. LastTriceID id.TriceID // TestTableMode is a special option for easy decoder test table generation. TestTableMode bool // Unsigned if true, forces hex and in values printed as unsigned values. Unsigned bool matchNextFormatSpecifier = regexp.MustCompile(patNextFormatSpecifier) matchNextFormatUSpecifier = regexp.MustCompile(patNextFormatUSpecifier) matchNextFormatISpecifier = regexp.MustCompile(patNextFormatISpecifier) matchNextFormatXSpecifier = regexp.MustCompile(patNextFormatXSpecifier) matchNextFormatFSpecifier = regexp.MustCompile(patNextFormatFSpecifier) matchNextFormatBoolSpecifier = regexp.MustCompile(patNextFormatBoolSpecifier) matchNextFormatPointerSpecifier = regexp.MustCompile(patNextFormatPointerSpecifier) DebugOut = false // DebugOut enables debug information. DumpLineByteCount int // DumpLineByteCount is the bytes per line for the dumpDec decoder. InitialCycle = true // InitialCycle is a helper for the cycle counter automatic. TargetTimestamp uint64 // targetTimestamp contains target specific timestamp value. TargetLocation uint32 // targetLocation contains 16 bit file id in high and 16 bit line number in low part. TargetStamp string // TargetTimeStampUnit is the target timestamps time base for default formatting. TargetStamp32 string // ShowTargetStamp32 is the format string for target timestamps. TargetStamp16 string // ShowTargetStamp16 is the format string for target timestamps. TargetStamp0 string // ShowTargetStamp0 is the format string for target timestamps. TargetTimeStampUnitPassed bool // TargetTimeStampUnitPassed is true when flag was TargetTimeStampUnit passed. ShowTargetStamp32Passed bool // ShowTargetStamp32Passed is true when flag was TargetTimeStamp32 passed. ShowTargetStamp16Passed bool // ShowTargetStamp16Passed is true when flag was TargetTimeStamp16 passed. ShowTargetStamp0Passed bool // ShowTargetStamp0Passed is true when flag was TargetTimeStamp0 passed. LocationInformationFormatString string // LocationInformationFormatString is the format string for target location: line number and file name. TargetTimestampSize int // TargetTimestampSize is set in dependence of trice type. TargetLocationExists bool // TargetLocationExists is set in dependence of p.COBSModeDescriptor. (obsolete) PackageFraming string // Framing is used for packing. Valid values COBS, TCOBS, TCOBSv1 (same as TCOBS) IDBits = 14 // IDBits holds count of bits used for ID (used at least in trexDecoder) NewlineIndent = -1 // Used for trice messages containing several newlines in format string for formatting. ) // New abstracts the function type for a new decoder. type New func(out io.Writer, lut id.TriceIDLookUp, m *sync.RWMutex, li id.TriceIDLookUpLI, in io.Reader, endian bool) Decoder // Decoder is providing a byte reader returning decoded trice's. // SetInput allows switching the input stream to a different source. type Decoder interface { io.Reader SetInput(io.Reader) } // DecoderData is the common data struct for all decoders. type DecoderData struct { W io.Writer // io.Stdout or the like In io.Reader // in is the inner reader, which is used to get raw bytes InnerBuffer []byte // avoid repeated allocation (trex) IBuf []byte // iBuf holds unprocessed (raw) bytes for interpretation. B []byte // read buffer holds a single decoded TCOBS package, which can contain several trices. B0 []byte // initial value for B Endian bool // endian is true for LittleEndian and false for BigEndian TriceSize int // trice head and payload size as number of bytes ParamSpace int // trice payload size after head SLen int // string length for TRICE_S Lut id.TriceIDLookUp // id look-up map for translation LutMutex *sync.RWMutex // to avoid concurrent map read and map write during map refresh triggered by filewatcher Li id.TriceIDLookUpLI // location information map Trice id.TriceFmt // id.TriceFmt // received trice }
// SetInput allows switching the input stream to a different source. // // This function is for easier testing with cycle counters. func (p *DecoderData) SetInput(r io.Reader) { p.In = r } // ReadU16 returns the 2 b bytes as uint16 according the specified endianness func (p *DecoderData) ReadU16(b []byte) uint16 { if p.Endian { return binary.LittleEndian.Uint16(b) } return binary.BigEndian.Uint16(b) } // ReadU32 returns the 4 b bytes as uint32 according the specified endianness func (p *DecoderData) ReadU32(b []byte) uint32 { if p.Endian { return binary.LittleEndian.Uint32(b) } return binary.BigEndian.Uint32(b) } // ReadU64 returns the 8 b bytes as uint64 according the specified endianness func (p *DecoderData) ReadU64(b []byte) uint64 { if p.Endian { return binary.LittleEndian.Uint64(b) } return binary.BigEndian.Uint64(b) } // UReplaceN checks all format specifier in i and replaces %nu with %nd and returns that result as o. // // If a replacement took place on position k u[k] is 1. Afterwards len(u) is amount of found format specifiers. // Additional, if UnsignedHex is true, for FormatX specifiers u[k] is also 1. // If a float format specifier was found at position k, u[k] is 2, // http://www.cplusplus.com/reference/cstdio/printf/ // https://www.codingunit.com/printf-format-specifiers-format-conversions-and-formatted-output func UReplaceN(i string) (o string, u []int) { o = i i = strings.ReplaceAll(i, "%%", "__") // this makes regex easier and faster var offset int for { s := i[offset:] // remove processed part loc := matchNextFormatSpecifier.FindStringIndex(s) if nil == loc { // no (more) fm found return } offset += loc[1] // track position fm := s[loc[0]:loc[1]] locPointer := matchNextFormatPointerSpecifier.FindStringIndex(fm) if nil != locPointer { // a %p found // This would require `unsafe.Pointer(uintptr(n))` inside unSignedOrSignedOut. // There are false positive windows vet warnings: // https://stackoverflow.com/questions/43767898/casting-a-int-to-a-pointer // https://github.com/golang/go/issues/41205 // As workaround replace %p with %x in the format strings. // Then trice64( "%p", -1 ) could be a problem when using `trice log -unsigned false` // But that we simply ignore right now. o = o[:offset-1] + "x" + o[offset:] // replace %np -> %nx u = append(u, PointerFormatSpecifier) // pointer value continue } locBool := matchNextFormatBoolSpecifier.FindStringIndex(fm) if nil != locBool { // a %t found u = append(u, BooleanFormatSpecifier) // bool value continue } locF := matchNextFormatFSpecifier.FindStringIndex(fm) if nil != locF { // a %nf found u = append(u, FloatFormatSpecifier) // float value continue } locU := matchNextFormatUSpecifier.FindStringIndex(fm) if nil != locU { // a %nu found o = o[:offset-1] + "d" + o[offset:] // replace %nu -> %nd u = append(u, UnsignedFormatSpecifier) // no negative values continue } locI := matchNextFormatISpecifier.FindStringIndex(fm) if nil != locI { // a %ni found o = o[:offset-1] + "d" + o[offset:] // replace %ni -> %nd u = append(u, SignedFormatSpecifier) // also negative values continue } locX := matchNextFormatXSpecifier.FindStringIndex(fm) if nil != locX { // a %nx, %nX or, %no, %nO or %nb found if Unsigned { u = append(u, 0) // no negative values } else { u = append(u, 1) // also negative values } continue } u = append(u, 1) // keep sign in all other cases(also negative values) } } // Dump prints the byte slice as hex in one line func Dump(w io.Writer, b []byte) { for _, x := range b { fmt.Fprintf(w, "%02x ", x) } fmt.Fprintln(w, "") }
random_line_split
decoder.go
// Copyright 2020 Thomas.Hoehenleitner [at] seerose.net // Use of this source code is governed by a license that can be found in the LICENSE file. // Package decoder provides several decoders for differently encoded trice streams. package decoder import ( "encoding/binary" "fmt" "io" "regexp" "strings" "sync" "github.com/rokath/trice/internal/id" ) // TestTable ist a struct slice generated by the trice tool -testTable option. type TestTable []struct { In []byte // byte buffer sequence Exp string // output } const ( // LittleEndian is true for little endian trice data. LittleEndian = true // BigEndian is the flag value for target endianness. BigEndian = false // defaultSize is the beginning receive and sync buffer size. DefaultSize = 64 * 1024 // patNextFormatSpecifier is a regex to find next format specifier in a string (exclude %%*) and ignoring %s // // https://regex101.com/r/BjiD5M/1 // Language C plus from language Go: %b, %F, %q // Partial implemented: %hi, %hu, %ld, %li, %lf, %Lf, %Lu, %lli, %lld // Not implemented: %s //patNextFormatSpecifier = `(?:^|[^%])(%[0-9]*(-|c|d|e|E|f|F|g|G|h|i|l|L|o|O|p|q|u|x|X|n|b))` //patNextFormatSpecifier = `%([+\-#'0-9\.0-9])*(c|d|e|E|f|F|g|G|h|i|l|L|o|O|p|q|u|x|X|n|b|t)` // assumes no `%%` inside string! patNextFormatSpecifier = `%([+\-#'0-9\.0-9])*(b|c|d|e|f|g|E|F|G|h|i|l|L|n|o|O|p|q|t|u|x|X)` // assumes no `%%` inside string! // patNextFormatUSpecifier is a regex to find next format u specifier in a string // It does also match %%u positions! //patNextFormatUSpecifier = `(?:%[0-9]*u)` patNextFormatUSpecifier = `%[0-9]*u` // assumes no `%%` inside string! // patNextFormatISpecifier is a regex to find next format i specifier in a string // It does also match %%i positions! patNextFormatISpecifier = `%[0-9]*i` // assumes no `%%` inside string! // patNextFormatXSpecifier is a regex to find next format x specifier in a string // It does also match %%x positions! // patNextFormatXSpecifier = `(?:%[0-9]*(l|o|O|x|X|b))` patNextFormatXSpecifier = `%[0-9]*(l|o|O|x|X|b|p|t)` // assumes no `%%` inside string! // patNextFormatFSpecifier is a regex to find next format f specifier in a string // It does also match %%f positions! patNextFormatFSpecifier = `%[(+\-0-9\.0-9#]*(e|E|f|F|g|G)` // assumes no `%%` inside string! // patNextFormatBoolSpecifier is a regex to find next format f specifier in a string // It does also match %%t positions! patNextFormatBoolSpecifier = `%t` // assumes no `%%` inside string! // patNextFormatPointerSpecifier is a regex to find next format f specifier in a string // It does also match %%t positions! patNextFormatPointerSpecifier = `%p` // assumes no `%%` inside string! // hints is the help information in case of errors. Hints = "att:Hints:Baudrate? Encoding? Interrupt? Overflow? Parameter count? Password? til.json? Version?" // DefaultStamp32 = "ssss,ms_µs" // "tim:%4d,%03d_%03d " // // DefaultStamp16 = "ms_µs" // "tim: %2d_%03d " UnsignedFormatSpecifier = 0 // %u -> %d SignedFormatSpecifier = 1 // FloatFormatSpecifier = 2 // %f and relatives BooleanFormatSpecifier = 3 // a %t (bool) found PointerFormatSpecifier = 4 // a %p (pointer) found ) var ( // Verbose gives more information on output if set. The value is injected from main packages. Verbose bool // ShowID is used as format string for displaying the first trice ID at the start of each line if not "". ShowID string // decoder.LastTriceID is last decoded ID. It is used for switch -showID. LastTriceID id.TriceID // TestTableMode is a special option for easy decoder test table generation. TestTableMode bool // Unsigned if true, forces hex and in values printed as unsigned values. Unsigned bool matchNextFormatSpecifier = regexp.MustCompile(patNextFormatSpecifier) matchNextFormatUSpecifier = regexp.MustCompile(patNextFormatUSpecifier) matchNextFormatISpecifier = regexp.MustCompile(patNextFormatISpecifier) matchNextFormatXSpecifier = regexp.MustCompile(patNextFormatXSpecifier) matchNextFormatFSpecifier = regexp.MustCompile(patNextFormatFSpecifier) matchNextFormatBoolSpecifier = regexp.MustCompile(patNextFormatBoolSpecifier) matchNextFormatPointerSpecifier = regexp.MustCompile(patNextFormatPointerSpecifier) DebugOut = false // DebugOut enables debug information. DumpLineByteCount int // DumpLineByteCount is the bytes per line for the dumpDec decoder. InitialCycle = true // InitialCycle is a helper for the cycle counter automatic. TargetTimestamp uint64 // targetTimestamp contains target specific timestamp value. TargetLocation uint32 // targetLocation contains 16 bit file id in high and 16 bit line number in low part. TargetStamp string // TargetTimeStampUnit is the target timestamps time base for default formatting. TargetStamp32 string // ShowTargetStamp32 is the format string for target timestamps. TargetStamp16 string // ShowTargetStamp16 is the format string for target timestamps. TargetStamp0 string // ShowTargetStamp0 is the format string for target timestamps. TargetTimeStampUnitPassed bool // TargetTimeStampUnitPassed is true when flag was TargetTimeStampUnit passed. ShowTargetStamp32Passed bool // ShowTargetStamp32Passed is true when flag was TargetTimeStamp32 passed. ShowTargetStamp16Passed bool // ShowTargetStamp16Passed is true when flag was TargetTimeStamp16 passed. ShowTargetStamp0Passed bool // ShowTargetStamp0Passed is true when flag was TargetTimeStamp0 passed. LocationInformationFormatString string // LocationInformationFormatString is the format string for target location: line number and file name. TargetTimestampSize int // TargetTimestampSize is set in dependence of trice type. TargetLocationExists bool // TargetLocationExists is set in dependence of p.COBSModeDescriptor. (obsolete) PackageFraming string // Framing is used for packing. Valid values COBS, TCOBS, TCOBSv1 (same as TCOBS) IDBits = 14 // IDBits holds count of bits used for ID (used at least in trexDecoder) NewlineIndent = -1 // Used for trice messages containing several newlines in format string for formatting. ) // New abstracts the function type for a new decoder. type New func(out io.Writer, lut id.TriceIDLookUp, m *sync.RWMutex, li id.TriceIDLookUpLI, in io.Reader, endian bool) Decoder // Decoder is providing a byte reader returning decoded trice's. // SetInput allows switching the input stream to a different source. type Decoder interface { io.Reader SetInput(io.Reader) } // DecoderData is the common data struct for all decoders. type DecoderData struct { W io.Writer // io.Stdout or the like In io.Reader // in is the inner reader, which is used to get raw bytes InnerBuffer []byte // avoid repeated allocation (trex) IBuf []byte // iBuf holds unprocessed (raw) bytes for interpretation. B []byte // read buffer holds a single decoded TCOBS package, which can contain several trices. B0 []byte // initial value for B Endian bool // endian is true for LittleEndian and false for BigEndian TriceSize int // trice head and payload size as number of bytes ParamSpace int // trice payload size after head SLen int // string length for TRICE_S Lut id.TriceIDLookUp // id look-up map for translation LutMutex *sync.RWMutex // to avoid concurrent map read and map write during map refresh triggered by filewatcher Li id.TriceIDLookUpLI // location information map Trice id.TriceFmt // id.TriceFmt // received trice } // SetInput allows switching the input stream to a different source. // // This function is for easier testing with cycle counters. func (p *DecoderData) SetInput(r io.Reader) { p.In = r } // ReadU16 returns the 2 b bytes as uint16 according the specified endianness func (p *DecoderData) ReadU16(b []byte) uint16 { if p.Endian { return binary.LittleEndian.Uint16(b) } return binary.BigEndian.Uint16(b) } // ReadU32 returns the 4 b bytes as uint32 according the specified endianness func (p *DecoderData) ReadU32(b []byte) uint32 { if p.Endian { return binary.LittleEndian.Uint32(b) } return binary.BigEndian.Uint32(b) } // ReadU64 returns the 8 b bytes as uint64 according the specified endianness func (p *DecoderData) ReadU64(b []byte) uint64 { if p.Endian { return binary.LittleEndian.Uint64(b) } return binary.BigEndian.Uint64(b) } // UReplaceN checks all format specifier in i and replaces %nu with %nd and returns that result as o. // // If a replacement took place on position k u[k] is 1. Afterwards len(u) is amount of found format specifiers. // Additional, if UnsignedHex is true, for FormatX specifiers u[k] is also 1. // If a float format specifier was found at position k, u[k] is 2, // http://www.cplusplus.com/reference/cstdio/printf/ // https://www.codingunit.com/printf-format-specifiers-format-conversions-and-formatted-output func UReplaceN(i string) (o string, u []int) { o = i i = strings.ReplaceAll(i, "%%", "__") // this makes regex easier and faster var offset int for { s := i[offset:] // remove processed part loc := matchNextFormatSpecifier.FindStringIndex(s) if nil == loc { // no (more) fm found return } offset += loc[1] // track position fm := s[loc[0]:loc[1]] locPointer := matchNextFormatPointerSpecifier.FindStringIndex(fm) if nil != locPointer { // a %p found // This would require `unsafe.Pointer(uintptr(n))` inside unSignedOrSignedOut. // There are false positive windows vet warnings: // https://stackoverflow.com/questions/43767898/casting-a-int-to-a-pointer // https://github.com/golang/go/issues/41205 // As workaround replace %p with %x in the format strings. // Then trice64( "%p", -1 ) could be a problem when using `trice log -unsigned false` // But that we simply ignore right now. o = o[:offset-1] + "x" + o[offset:] // replace %np -> %nx u = append(u, PointerFormatSpecifier) // pointer value continue } locBool := matchNextFormatBoolSpecifier.FindStringIndex(fm) if nil != locBool { // a %t found u = append(u, BooleanFormatSpecifier) // bool value continue } locF := matchNextFormatFSpecifier.FindStringIndex(fm) if nil != locF { // a %nf found u = append(u, FloatFormatSpecifier) // float value continue } locU := matchNextFormatUSpecifier.FindStringIndex(fm) if nil != locU { // a %nu found o = o[:offset-1] + "d" + o[offset:] // replace %nu -> %nd u = append(u, UnsignedFormatSpecifier) // no negative values continue } locI := matchNextFormatISpecifier.FindStringIndex(fm) if nil != locI { // a %ni found o = o[:offset-1] + "d" + o[offset:] // replace %ni -> %nd u = append(u, SignedFormatSpecifier) // also negative values continue } locX := matchNextFormatXSpecifier.FindStringIndex(fm) if nil != locX {
u = append(u, 1) // keep sign in all other cases(also negative values) } } // Dump prints the byte slice as hex in one line func Dump(w io.Writer, b []byte) { for _, x := range b { fmt.Fprintf(w, "%02x ", x) } fmt.Fprintln(w, "") }
// a %nx, %nX or, %no, %nO or %nb found if Unsigned { u = append(u, 0) // no negative values } else { u = append(u, 1) // also negative values } continue }
conditional_block
pool.rs
// This module provides a relatively simple thread-safe pool of reusable // objects. For the most part, it's implemented by a stack represented by a // Mutex<Vec<T>>. It has one small trick: because unlocking a mutex is somewhat // costly, in the case where a pool is accessed by the first thread that tried // to get a value, we bypass the mutex. Here are some benchmarks showing the // difference. // // 1) misc::anchored_literal_long_non_match 21 (18571 MB/s) // 2) misc::anchored_literal_long_non_match 107 (3644 MB/s) // 3) misc::anchored_literal_long_non_match 45 (8666 MB/s) // 4) misc::anchored_literal_long_non_match 19 (20526 MB/s) // // (1) represents our baseline: the master branch at the time of writing when // using the 'thread_local' crate to implement the pool below. // // (2) represents a naive pool implemented completely via Mutex<Vec<T>>. There // is no special trick for bypassing the mutex. // // (3) is the same as (2), except it uses Mutex<Vec<Box<T>>>. It is twice as // fast because a Box<T> is much smaller than the T we use with a Pool in this // crate. So pushing and popping a Box<T> from a Vec is quite a bit faster // than for T. // // (4) is the same as (3), but with the trick for bypassing the mutex in the // case of the first-to-get thread. // // Why move off of thread_local? Even though (4) is a hair faster than (1) // above, this was not the main goal. The main goal was to move off of // thread_local and find a way to *simply* re-capture some of its speed for // regex's specific case. So again, why move off of it? The *primary* reason is // because of memory leaks. See https://github.com/rust-lang/regex/issues/362 // for example. (Why do I want it to be simple? Well, I suppose what I mean is, // "use as much safe code as possible to minimize risk and be as sure as I can // be that it is correct.") // // My guess is that the thread_local design is probably not appropriate for // regex since its memory usage scales to the number of active threads that // have used a regex, where as the pool below scales to the number of threads // that simultaneously use a regex. While neither case permits contraction, // since we own the pool data structure below, we can add contraction if a // clear use case pops up in the wild. More pressingly though, it seems that // there are at least some use case patterns where one might have many threads // sitting around that might have used a regex at one point. While thread_local // does try to reuse space previously used by a thread that has since stopped, // its maximal memory usage still scales with the total number of active // threads. In contrast, the pool below scales with the total number of threads // *simultaneously* using the pool. The hope is that this uses less memory // overall. And if it doesn't, we can hopefully tune it somehow. // // It seems that these sort of conditions happen frequently // in FFI inside of other more "managed" languages. This was // mentioned in the issue linked above, and also mentioned here: // https://github.com/BurntSushi/rure-go/issues/3. And in particular, users // confirm that disabling the use of thread_local resolves the leak. // // There were other weaker reasons for moving off of thread_local as well. // Namely, at the time, I was looking to reduce dependencies. And for something // like regex, maintenance can be simpler when we own the full dependency tree. use std::panic::{RefUnwindSafe, UnwindSafe}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Mutex; /// An atomic counter used to allocate thread IDs. static COUNTER: AtomicUsize = AtomicUsize::new(1); thread_local!( /// A thread local used to assign an ID to a thread. static THREAD_ID: usize = { let next = COUNTER.fetch_add(1, Ordering::Relaxed); // SAFETY: We cannot permit the reuse of thread IDs since reusing a // thread ID might result in more than one thread "owning" a pool, // and thus, permit accessing a mutable value from multiple threads // simultaneously without synchronization. The intent of this panic is // to be a sanity check. It is not expected that the thread ID space // will actually be exhausted in practice. // // This checks that the counter never wraps around, since atomic // addition wraps around on overflow. if next == 0 { panic!("regex: thread ID allocation space exhausted"); } next }; ); /// The type of the function used to create values in a pool when the pool is /// empty and the caller requests one. type CreateFn<T> = Box<dyn Fn() -> T + Send + Sync + UnwindSafe + RefUnwindSafe + 'static>; /// A simple thread safe pool for reusing values. /// /// Getting a value out comes with a guard. When that guard is dropped, the /// value is automatically put back in the pool. /// /// A Pool<T> impls Sync when T is Send (even if it's not Sync). This means /// that T can use interior mutability. This is possible because a pool is /// guaranteed to provide a value to exactly one thread at any time. /// /// Currently, a pool never contracts in size. Its size is proportional to the /// number of simultaneous uses. pub struct Pool<T> { /// A stack of T values to hand out. These are used when a Pool is /// accessed by a thread that didn't create it. stack: Mutex<Vec<Box<T>>>, /// A function to create more T values when stack is empty and a caller /// has requested a T. create: CreateFn<T>, /// The ID of the thread that owns this pool. The owner is the thread /// that makes the first call to 'get'. When the owner calls 'get', it /// gets 'owner_val' directly instead of returning a T from 'stack'. /// See comments elsewhere for details, but this is intended to be an /// optimization for the common case that makes getting a T faster. /// /// It is initialized to a value of zero (an impossible thread ID) as a /// sentinel to indicate that it is unowned. owner: AtomicUsize, /// A value to return when the caller is in the same thread that created /// the Pool. owner_val: T, } // SAFETY: Since we want to use a Pool from multiple threads simultaneously // behind an Arc, we need for it to be Sync. In cases where T is sync, Pool<T> // would be Sync. However, since we use a Pool to store mutable scratch space, // we wind up using a T that has interior mutability and is thus itself not // Sync. So what we *really* want is for our Pool<T> to by Sync even when T is // not Sync (but is at least Send). // // The only non-sync aspect of a Pool is its 'owner_val' field, which is used // to implement faster access to a pool value in the common case of a pool // being accessed in the same thread in which it was created. The 'stack' field // is also shared, but a Mutex<T> where T: Send is already Sync. So we only // need to worry about 'owner_val'. // // The key is to guarantee that 'owner_val' can only ever be accessed from one // thread. In our implementation below, we guarantee this by only returning the // 'owner_val' when the ID of the current thread matches the ID of the thread // that created the Pool. Since this can only ever be one thread, it follows // that only one thread can access 'owner_val' at any point in time. Thus, it // is safe to declare that Pool<T> is Sync when T is Send. // // NOTE: It would also be possible to make the owning thread be the *first* // thread that tries to get a value out of a Pool. However, the current // implementation is a little simpler and it's not clear if making the first // thread (rather than the creating thread) is meaningfully better. // // If there is a way to achieve our performance goals using safe code, then // I would very much welcome a patch. As it stands, the implementation below // tries to balance safety with performance. The case where a Regex is used // from multiple threads simultaneously will suffer a bit since getting a cache // will require unlocking a mutex. unsafe impl<T: Send> Sync for Pool<T> {} impl<T: ::std::fmt::Debug> ::std::fmt::Debug for Pool<T> { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { f.debug_struct("Pool") .field("stack", &self.stack) .field("owner", &self.owner) .field("owner_val", &self.owner_val) .finish() } } /// A guard that is returned when a caller requests a value from the pool. /// /// The purpose of the guard is to use RAII to automatically put the value back /// in the pool once it's dropped. #[derive(Debug)] pub struct PoolGuard<'a, T: Send> { /// The pool that this guard is attached to. pool: &'a Pool<T>, /// This is None when the guard represents the special "owned" value. In /// which case, the value is retrieved from 'pool.owner_val'. value: Option<Box<T>>, } impl<T: Send> Pool<T> { /// Create a new pool. The given closure is used to create values in the /// pool when necessary. pub fn new(create: CreateFn<T>) -> Pool<T> { let owner = AtomicUsize::new(0); let owner_val = create(); Pool { stack: Mutex::new(vec![]), create, owner, owner_val } } /// Get a value from the pool. The caller is guaranteed to have exclusive /// access to the given value. /// /// Note that there is no guarantee provided about which value in the /// pool is returned. That is, calling get, dropping the guard (causing /// the value to go back into the pool) and then calling get again is NOT /// guaranteed to return the same value received in the first get call. #[cfg_attr(feature = "perf-inline", inline(always))] pub fn get(&self) -> PoolGuard<'_, T> { // Our fast path checks if the caller is the thread that "owns" this // pool. Or stated differently, whether it is the first thread that // tried to extract a value from the pool. If it is, then we can return // a T to the caller without going through a mutex. // // SAFETY: We must guarantee that only one thread gets access to this // value. Since a thread is uniquely identified by the THREAD_ID thread // local, it follows that is the caller's thread ID is equal to the // owner, then only one thread may receive this value. let caller = THREAD_ID.with(|id| *id); let owner = self.owner.load(Ordering::Relaxed); if caller == owner { return self.guard_owned(); } self.get_slow(caller, owner) } /// This is the "slow" version that goes through a mutex to pop an /// allocated value off a stack to return to the caller. (Or, if the stack /// is empty, a new value is created.) /// /// If the pool has no owner, then this will set the owner. #[cold] fn get_slow(&self, caller: usize, owner: usize) -> PoolGuard<'_, T> { use std::sync::atomic::Ordering::Relaxed; if owner == 0 { // The sentinel 0 value means this pool is not yet owned. We // try to atomically set the owner. If we do, then this thread // becomes the owner and we can return a guard that represents // the special T for the owner. let res = self.owner.compare_exchange(0, caller, Relaxed, Relaxed); if res.is_ok() { return self.guard_owned(); } } let mut stack = self.stack.lock().unwrap(); let value = match stack.pop() { None => Box::new((self.create)()), Some(value) => value, }; self.guard_stack(value) } /// Puts a value back into the pool. Callers don't need to call this. Once /// the guard that's returned by 'get' is dropped, it is put back into the /// pool automatically. fn put(&self, value: Box<T>) { let mut stack = self.stack.lock().unwrap(); stack.push(value); } /// Create a guard that represents the special owned T. fn guard_owned(&self) -> PoolGuard<'_, T> { PoolGuard { pool: self, value: None } } /// Create a guard that contains a value from the pool's stack. fn guard_stack(&self, value: Box<T>) -> PoolGuard<'_, T>
} impl<'a, T: Send> PoolGuard<'a, T> { /// Return the underlying value. pub fn value(&self) -> &T { match self.value { None => &self.pool.owner_val, Some(ref v) => &**v, } } } impl<'a, T: Send> Drop for PoolGuard<'a, T> { #[cfg_attr(feature = "perf-inline", inline(always))] fn drop(&mut self) { if let Some(value) = self.value.take() { self.pool.put(value); } } } #[cfg(test)] mod tests { use std::panic::{RefUnwindSafe, UnwindSafe}; use super::*; #[test] fn oibits() { use crate::exec::ProgramCache; fn has_oibits<T: Send + Sync + UnwindSafe + RefUnwindSafe>() {} has_oibits::<Pool<ProgramCache>>(); } // Tests that Pool implements the "single owner" optimization. That is, the // thread that first accesses the pool gets its own copy, while all other // threads get distinct copies. #[test] fn thread_owner_optimization() { use std::cell::RefCell; use std::sync::Arc; let pool: Arc<Pool<RefCell<Vec<char>>>> = Arc::new(Pool::new(Box::new(|| RefCell::new(vec!['a'])))); pool.get().value().borrow_mut().push('x'); let pool1 = pool.clone(); let t1 = std::thread::spawn(move || { let guard = pool1.get(); let v = guard.value(); v.borrow_mut().push('y'); }); let pool2 = pool.clone(); let t2 = std::thread::spawn(move || { let guard = pool2.get(); let v = guard.value(); v.borrow_mut().push('z'); }); t1.join().unwrap(); t2.join().unwrap(); // If we didn't implement the single owner optimization, then one of // the threads above is likely to have mutated the [a, x] vec that // we stuffed in the pool before spawning the threads. But since // neither thread was first to access the pool, and because of the // optimization, we should be guaranteed that neither thread mutates // the special owned pool value. // // (Technically this is an implementation detail and not a contract of // Pool's API.) assert_eq!(vec!['a', 'x'], *pool.get().value().borrow()); } }
{ PoolGuard { pool: self, value: Some(value) } }
identifier_body
pool.rs
// This module provides a relatively simple thread-safe pool of reusable // objects. For the most part, it's implemented by a stack represented by a // Mutex<Vec<T>>. It has one small trick: because unlocking a mutex is somewhat // costly, in the case where a pool is accessed by the first thread that tried // to get a value, we bypass the mutex. Here are some benchmarks showing the // difference. // // 1) misc::anchored_literal_long_non_match 21 (18571 MB/s) // 2) misc::anchored_literal_long_non_match 107 (3644 MB/s) // 3) misc::anchored_literal_long_non_match 45 (8666 MB/s) // 4) misc::anchored_literal_long_non_match 19 (20526 MB/s) // // (1) represents our baseline: the master branch at the time of writing when // using the 'thread_local' crate to implement the pool below. // // (2) represents a naive pool implemented completely via Mutex<Vec<T>>. There // is no special trick for bypassing the mutex. // // (3) is the same as (2), except it uses Mutex<Vec<Box<T>>>. It is twice as // fast because a Box<T> is much smaller than the T we use with a Pool in this // crate. So pushing and popping a Box<T> from a Vec is quite a bit faster // than for T. // // (4) is the same as (3), but with the trick for bypassing the mutex in the // case of the first-to-get thread. // // Why move off of thread_local? Even though (4) is a hair faster than (1) // above, this was not the main goal. The main goal was to move off of // thread_local and find a way to *simply* re-capture some of its speed for // regex's specific case. So again, why move off of it? The *primary* reason is // because of memory leaks. See https://github.com/rust-lang/regex/issues/362 // for example. (Why do I want it to be simple? Well, I suppose what I mean is, // "use as much safe code as possible to minimize risk and be as sure as I can // be that it is correct.") // // My guess is that the thread_local design is probably not appropriate for // regex since its memory usage scales to the number of active threads that // have used a regex, where as the pool below scales to the number of threads // that simultaneously use a regex. While neither case permits contraction, // since we own the pool data structure below, we can add contraction if a // clear use case pops up in the wild. More pressingly though, it seems that // there are at least some use case patterns where one might have many threads // sitting around that might have used a regex at one point. While thread_local // does try to reuse space previously used by a thread that has since stopped, // its maximal memory usage still scales with the total number of active // threads. In contrast, the pool below scales with the total number of threads // *simultaneously* using the pool. The hope is that this uses less memory // overall. And if it doesn't, we can hopefully tune it somehow. // // It seems that these sort of conditions happen frequently // in FFI inside of other more "managed" languages. This was // mentioned in the issue linked above, and also mentioned here: // https://github.com/BurntSushi/rure-go/issues/3. And in particular, users // confirm that disabling the use of thread_local resolves the leak. // // There were other weaker reasons for moving off of thread_local as well. // Namely, at the time, I was looking to reduce dependencies. And for something // like regex, maintenance can be simpler when we own the full dependency tree. use std::panic::{RefUnwindSafe, UnwindSafe}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Mutex; /// An atomic counter used to allocate thread IDs. static COUNTER: AtomicUsize = AtomicUsize::new(1); thread_local!( /// A thread local used to assign an ID to a thread. static THREAD_ID: usize = { let next = COUNTER.fetch_add(1, Ordering::Relaxed); // SAFETY: We cannot permit the reuse of thread IDs since reusing a // thread ID might result in more than one thread "owning" a pool, // and thus, permit accessing a mutable value from multiple threads // simultaneously without synchronization. The intent of this panic is // to be a sanity check. It is not expected that the thread ID space // will actually be exhausted in practice. // // This checks that the counter never wraps around, since atomic // addition wraps around on overflow. if next == 0 { panic!("regex: thread ID allocation space exhausted"); } next }; ); /// The type of the function used to create values in a pool when the pool is /// empty and the caller requests one. type CreateFn<T> = Box<dyn Fn() -> T + Send + Sync + UnwindSafe + RefUnwindSafe + 'static>; /// A simple thread safe pool for reusing values. /// /// Getting a value out comes with a guard. When that guard is dropped, the /// value is automatically put back in the pool. /// /// A Pool<T> impls Sync when T is Send (even if it's not Sync). This means /// that T can use interior mutability. This is possible because a pool is /// guaranteed to provide a value to exactly one thread at any time. /// /// Currently, a pool never contracts in size. Its size is proportional to the /// number of simultaneous uses. pub struct
<T> { /// A stack of T values to hand out. These are used when a Pool is /// accessed by a thread that didn't create it. stack: Mutex<Vec<Box<T>>>, /// A function to create more T values when stack is empty and a caller /// has requested a T. create: CreateFn<T>, /// The ID of the thread that owns this pool. The owner is the thread /// that makes the first call to 'get'. When the owner calls 'get', it /// gets 'owner_val' directly instead of returning a T from 'stack'. /// See comments elsewhere for details, but this is intended to be an /// optimization for the common case that makes getting a T faster. /// /// It is initialized to a value of zero (an impossible thread ID) as a /// sentinel to indicate that it is unowned. owner: AtomicUsize, /// A value to return when the caller is in the same thread that created /// the Pool. owner_val: T, } // SAFETY: Since we want to use a Pool from multiple threads simultaneously // behind an Arc, we need for it to be Sync. In cases where T is sync, Pool<T> // would be Sync. However, since we use a Pool to store mutable scratch space, // we wind up using a T that has interior mutability and is thus itself not // Sync. So what we *really* want is for our Pool<T> to by Sync even when T is // not Sync (but is at least Send). // // The only non-sync aspect of a Pool is its 'owner_val' field, which is used // to implement faster access to a pool value in the common case of a pool // being accessed in the same thread in which it was created. The 'stack' field // is also shared, but a Mutex<T> where T: Send is already Sync. So we only // need to worry about 'owner_val'. // // The key is to guarantee that 'owner_val' can only ever be accessed from one // thread. In our implementation below, we guarantee this by only returning the // 'owner_val' when the ID of the current thread matches the ID of the thread // that created the Pool. Since this can only ever be one thread, it follows // that only one thread can access 'owner_val' at any point in time. Thus, it // is safe to declare that Pool<T> is Sync when T is Send. // // NOTE: It would also be possible to make the owning thread be the *first* // thread that tries to get a value out of a Pool. However, the current // implementation is a little simpler and it's not clear if making the first // thread (rather than the creating thread) is meaningfully better. // // If there is a way to achieve our performance goals using safe code, then // I would very much welcome a patch. As it stands, the implementation below // tries to balance safety with performance. The case where a Regex is used // from multiple threads simultaneously will suffer a bit since getting a cache // will require unlocking a mutex. unsafe impl<T: Send> Sync for Pool<T> {} impl<T: ::std::fmt::Debug> ::std::fmt::Debug for Pool<T> { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { f.debug_struct("Pool") .field("stack", &self.stack) .field("owner", &self.owner) .field("owner_val", &self.owner_val) .finish() } } /// A guard that is returned when a caller requests a value from the pool. /// /// The purpose of the guard is to use RAII to automatically put the value back /// in the pool once it's dropped. #[derive(Debug)] pub struct PoolGuard<'a, T: Send> { /// The pool that this guard is attached to. pool: &'a Pool<T>, /// This is None when the guard represents the special "owned" value. In /// which case, the value is retrieved from 'pool.owner_val'. value: Option<Box<T>>, } impl<T: Send> Pool<T> { /// Create a new pool. The given closure is used to create values in the /// pool when necessary. pub fn new(create: CreateFn<T>) -> Pool<T> { let owner = AtomicUsize::new(0); let owner_val = create(); Pool { stack: Mutex::new(vec![]), create, owner, owner_val } } /// Get a value from the pool. The caller is guaranteed to have exclusive /// access to the given value. /// /// Note that there is no guarantee provided about which value in the /// pool is returned. That is, calling get, dropping the guard (causing /// the value to go back into the pool) and then calling get again is NOT /// guaranteed to return the same value received in the first get call. #[cfg_attr(feature = "perf-inline", inline(always))] pub fn get(&self) -> PoolGuard<'_, T> { // Our fast path checks if the caller is the thread that "owns" this // pool. Or stated differently, whether it is the first thread that // tried to extract a value from the pool. If it is, then we can return // a T to the caller without going through a mutex. // // SAFETY: We must guarantee that only one thread gets access to this // value. Since a thread is uniquely identified by the THREAD_ID thread // local, it follows that is the caller's thread ID is equal to the // owner, then only one thread may receive this value. let caller = THREAD_ID.with(|id| *id); let owner = self.owner.load(Ordering::Relaxed); if caller == owner { return self.guard_owned(); } self.get_slow(caller, owner) } /// This is the "slow" version that goes through a mutex to pop an /// allocated value off a stack to return to the caller. (Or, if the stack /// is empty, a new value is created.) /// /// If the pool has no owner, then this will set the owner. #[cold] fn get_slow(&self, caller: usize, owner: usize) -> PoolGuard<'_, T> { use std::sync::atomic::Ordering::Relaxed; if owner == 0 { // The sentinel 0 value means this pool is not yet owned. We // try to atomically set the owner. If we do, then this thread // becomes the owner and we can return a guard that represents // the special T for the owner. let res = self.owner.compare_exchange(0, caller, Relaxed, Relaxed); if res.is_ok() { return self.guard_owned(); } } let mut stack = self.stack.lock().unwrap(); let value = match stack.pop() { None => Box::new((self.create)()), Some(value) => value, }; self.guard_stack(value) } /// Puts a value back into the pool. Callers don't need to call this. Once /// the guard that's returned by 'get' is dropped, it is put back into the /// pool automatically. fn put(&self, value: Box<T>) { let mut stack = self.stack.lock().unwrap(); stack.push(value); } /// Create a guard that represents the special owned T. fn guard_owned(&self) -> PoolGuard<'_, T> { PoolGuard { pool: self, value: None } } /// Create a guard that contains a value from the pool's stack. fn guard_stack(&self, value: Box<T>) -> PoolGuard<'_, T> { PoolGuard { pool: self, value: Some(value) } } } impl<'a, T: Send> PoolGuard<'a, T> { /// Return the underlying value. pub fn value(&self) -> &T { match self.value { None => &self.pool.owner_val, Some(ref v) => &**v, } } } impl<'a, T: Send> Drop for PoolGuard<'a, T> { #[cfg_attr(feature = "perf-inline", inline(always))] fn drop(&mut self) { if let Some(value) = self.value.take() { self.pool.put(value); } } } #[cfg(test)] mod tests { use std::panic::{RefUnwindSafe, UnwindSafe}; use super::*; #[test] fn oibits() { use crate::exec::ProgramCache; fn has_oibits<T: Send + Sync + UnwindSafe + RefUnwindSafe>() {} has_oibits::<Pool<ProgramCache>>(); } // Tests that Pool implements the "single owner" optimization. That is, the // thread that first accesses the pool gets its own copy, while all other // threads get distinct copies. #[test] fn thread_owner_optimization() { use std::cell::RefCell; use std::sync::Arc; let pool: Arc<Pool<RefCell<Vec<char>>>> = Arc::new(Pool::new(Box::new(|| RefCell::new(vec!['a'])))); pool.get().value().borrow_mut().push('x'); let pool1 = pool.clone(); let t1 = std::thread::spawn(move || { let guard = pool1.get(); let v = guard.value(); v.borrow_mut().push('y'); }); let pool2 = pool.clone(); let t2 = std::thread::spawn(move || { let guard = pool2.get(); let v = guard.value(); v.borrow_mut().push('z'); }); t1.join().unwrap(); t2.join().unwrap(); // If we didn't implement the single owner optimization, then one of // the threads above is likely to have mutated the [a, x] vec that // we stuffed in the pool before spawning the threads. But since // neither thread was first to access the pool, and because of the // optimization, we should be guaranteed that neither thread mutates // the special owned pool value. // // (Technically this is an implementation detail and not a contract of // Pool's API.) assert_eq!(vec!['a', 'x'], *pool.get().value().borrow()); } }
Pool
identifier_name
pool.rs
// This module provides a relatively simple thread-safe pool of reusable // objects. For the most part, it's implemented by a stack represented by a // Mutex<Vec<T>>. It has one small trick: because unlocking a mutex is somewhat // costly, in the case where a pool is accessed by the first thread that tried // to get a value, we bypass the mutex. Here are some benchmarks showing the // difference. // // 1) misc::anchored_literal_long_non_match 21 (18571 MB/s) // 2) misc::anchored_literal_long_non_match 107 (3644 MB/s) // 3) misc::anchored_literal_long_non_match 45 (8666 MB/s) // 4) misc::anchored_literal_long_non_match 19 (20526 MB/s) // // (1) represents our baseline: the master branch at the time of writing when // using the 'thread_local' crate to implement the pool below. // // (2) represents a naive pool implemented completely via Mutex<Vec<T>>. There // is no special trick for bypassing the mutex. // // (3) is the same as (2), except it uses Mutex<Vec<Box<T>>>. It is twice as // fast because a Box<T> is much smaller than the T we use with a Pool in this // crate. So pushing and popping a Box<T> from a Vec is quite a bit faster // than for T. // // (4) is the same as (3), but with the trick for bypassing the mutex in the // case of the first-to-get thread. // // Why move off of thread_local? Even though (4) is a hair faster than (1) // above, this was not the main goal. The main goal was to move off of // thread_local and find a way to *simply* re-capture some of its speed for // regex's specific case. So again, why move off of it? The *primary* reason is // because of memory leaks. See https://github.com/rust-lang/regex/issues/362 // for example. (Why do I want it to be simple? Well, I suppose what I mean is, // "use as much safe code as possible to minimize risk and be as sure as I can // be that it is correct.") // // My guess is that the thread_local design is probably not appropriate for // regex since its memory usage scales to the number of active threads that // have used a regex, where as the pool below scales to the number of threads // that simultaneously use a regex. While neither case permits contraction, // since we own the pool data structure below, we can add contraction if a // clear use case pops up in the wild. More pressingly though, it seems that // there are at least some use case patterns where one might have many threads // sitting around that might have used a regex at one point. While thread_local // does try to reuse space previously used by a thread that has since stopped, // its maximal memory usage still scales with the total number of active // threads. In contrast, the pool below scales with the total number of threads // *simultaneously* using the pool. The hope is that this uses less memory // overall. And if it doesn't, we can hopefully tune it somehow. // // It seems that these sort of conditions happen frequently // in FFI inside of other more "managed" languages. This was // mentioned in the issue linked above, and also mentioned here: // https://github.com/BurntSushi/rure-go/issues/3. And in particular, users // confirm that disabling the use of thread_local resolves the leak. // // There were other weaker reasons for moving off of thread_local as well. // Namely, at the time, I was looking to reduce dependencies. And for something // like regex, maintenance can be simpler when we own the full dependency tree. use std::panic::{RefUnwindSafe, UnwindSafe}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Mutex; /// An atomic counter used to allocate thread IDs. static COUNTER: AtomicUsize = AtomicUsize::new(1); thread_local!( /// A thread local used to assign an ID to a thread. static THREAD_ID: usize = { let next = COUNTER.fetch_add(1, Ordering::Relaxed); // SAFETY: We cannot permit the reuse of thread IDs since reusing a // thread ID might result in more than one thread "owning" a pool, // and thus, permit accessing a mutable value from multiple threads // simultaneously without synchronization. The intent of this panic is // to be a sanity check. It is not expected that the thread ID space // will actually be exhausted in practice. // // This checks that the counter never wraps around, since atomic // addition wraps around on overflow. if next == 0 { panic!("regex: thread ID allocation space exhausted"); } next }; ); /// The type of the function used to create values in a pool when the pool is /// empty and the caller requests one. type CreateFn<T> = Box<dyn Fn() -> T + Send + Sync + UnwindSafe + RefUnwindSafe + 'static>; /// A simple thread safe pool for reusing values. /// /// Getting a value out comes with a guard. When that guard is dropped, the /// value is automatically put back in the pool. /// /// A Pool<T> impls Sync when T is Send (even if it's not Sync). This means /// that T can use interior mutability. This is possible because a pool is /// guaranteed to provide a value to exactly one thread at any time. /// /// Currently, a pool never contracts in size. Its size is proportional to the /// number of simultaneous uses. pub struct Pool<T> { /// A stack of T values to hand out. These are used when a Pool is /// accessed by a thread that didn't create it. stack: Mutex<Vec<Box<T>>>, /// A function to create more T values when stack is empty and a caller /// has requested a T. create: CreateFn<T>,
/// gets 'owner_val' directly instead of returning a T from 'stack'. /// See comments elsewhere for details, but this is intended to be an /// optimization for the common case that makes getting a T faster. /// /// It is initialized to a value of zero (an impossible thread ID) as a /// sentinel to indicate that it is unowned. owner: AtomicUsize, /// A value to return when the caller is in the same thread that created /// the Pool. owner_val: T, } // SAFETY: Since we want to use a Pool from multiple threads simultaneously // behind an Arc, we need for it to be Sync. In cases where T is sync, Pool<T> // would be Sync. However, since we use a Pool to store mutable scratch space, // we wind up using a T that has interior mutability and is thus itself not // Sync. So what we *really* want is for our Pool<T> to by Sync even when T is // not Sync (but is at least Send). // // The only non-sync aspect of a Pool is its 'owner_val' field, which is used // to implement faster access to a pool value in the common case of a pool // being accessed in the same thread in which it was created. The 'stack' field // is also shared, but a Mutex<T> where T: Send is already Sync. So we only // need to worry about 'owner_val'. // // The key is to guarantee that 'owner_val' can only ever be accessed from one // thread. In our implementation below, we guarantee this by only returning the // 'owner_val' when the ID of the current thread matches the ID of the thread // that created the Pool. Since this can only ever be one thread, it follows // that only one thread can access 'owner_val' at any point in time. Thus, it // is safe to declare that Pool<T> is Sync when T is Send. // // NOTE: It would also be possible to make the owning thread be the *first* // thread that tries to get a value out of a Pool. However, the current // implementation is a little simpler and it's not clear if making the first // thread (rather than the creating thread) is meaningfully better. // // If there is a way to achieve our performance goals using safe code, then // I would very much welcome a patch. As it stands, the implementation below // tries to balance safety with performance. The case where a Regex is used // from multiple threads simultaneously will suffer a bit since getting a cache // will require unlocking a mutex. unsafe impl<T: Send> Sync for Pool<T> {} impl<T: ::std::fmt::Debug> ::std::fmt::Debug for Pool<T> { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { f.debug_struct("Pool") .field("stack", &self.stack) .field("owner", &self.owner) .field("owner_val", &self.owner_val) .finish() } } /// A guard that is returned when a caller requests a value from the pool. /// /// The purpose of the guard is to use RAII to automatically put the value back /// in the pool once it's dropped. #[derive(Debug)] pub struct PoolGuard<'a, T: Send> { /// The pool that this guard is attached to. pool: &'a Pool<T>, /// This is None when the guard represents the special "owned" value. In /// which case, the value is retrieved from 'pool.owner_val'. value: Option<Box<T>>, } impl<T: Send> Pool<T> { /// Create a new pool. The given closure is used to create values in the /// pool when necessary. pub fn new(create: CreateFn<T>) -> Pool<T> { let owner = AtomicUsize::new(0); let owner_val = create(); Pool { stack: Mutex::new(vec![]), create, owner, owner_val } } /// Get a value from the pool. The caller is guaranteed to have exclusive /// access to the given value. /// /// Note that there is no guarantee provided about which value in the /// pool is returned. That is, calling get, dropping the guard (causing /// the value to go back into the pool) and then calling get again is NOT /// guaranteed to return the same value received in the first get call. #[cfg_attr(feature = "perf-inline", inline(always))] pub fn get(&self) -> PoolGuard<'_, T> { // Our fast path checks if the caller is the thread that "owns" this // pool. Or stated differently, whether it is the first thread that // tried to extract a value from the pool. If it is, then we can return // a T to the caller without going through a mutex. // // SAFETY: We must guarantee that only one thread gets access to this // value. Since a thread is uniquely identified by the THREAD_ID thread // local, it follows that is the caller's thread ID is equal to the // owner, then only one thread may receive this value. let caller = THREAD_ID.with(|id| *id); let owner = self.owner.load(Ordering::Relaxed); if caller == owner { return self.guard_owned(); } self.get_slow(caller, owner) } /// This is the "slow" version that goes through a mutex to pop an /// allocated value off a stack to return to the caller. (Or, if the stack /// is empty, a new value is created.) /// /// If the pool has no owner, then this will set the owner. #[cold] fn get_slow(&self, caller: usize, owner: usize) -> PoolGuard<'_, T> { use std::sync::atomic::Ordering::Relaxed; if owner == 0 { // The sentinel 0 value means this pool is not yet owned. We // try to atomically set the owner. If we do, then this thread // becomes the owner and we can return a guard that represents // the special T for the owner. let res = self.owner.compare_exchange(0, caller, Relaxed, Relaxed); if res.is_ok() { return self.guard_owned(); } } let mut stack = self.stack.lock().unwrap(); let value = match stack.pop() { None => Box::new((self.create)()), Some(value) => value, }; self.guard_stack(value) } /// Puts a value back into the pool. Callers don't need to call this. Once /// the guard that's returned by 'get' is dropped, it is put back into the /// pool automatically. fn put(&self, value: Box<T>) { let mut stack = self.stack.lock().unwrap(); stack.push(value); } /// Create a guard that represents the special owned T. fn guard_owned(&self) -> PoolGuard<'_, T> { PoolGuard { pool: self, value: None } } /// Create a guard that contains a value from the pool's stack. fn guard_stack(&self, value: Box<T>) -> PoolGuard<'_, T> { PoolGuard { pool: self, value: Some(value) } } } impl<'a, T: Send> PoolGuard<'a, T> { /// Return the underlying value. pub fn value(&self) -> &T { match self.value { None => &self.pool.owner_val, Some(ref v) => &**v, } } } impl<'a, T: Send> Drop for PoolGuard<'a, T> { #[cfg_attr(feature = "perf-inline", inline(always))] fn drop(&mut self) { if let Some(value) = self.value.take() { self.pool.put(value); } } } #[cfg(test)] mod tests { use std::panic::{RefUnwindSafe, UnwindSafe}; use super::*; #[test] fn oibits() { use crate::exec::ProgramCache; fn has_oibits<T: Send + Sync + UnwindSafe + RefUnwindSafe>() {} has_oibits::<Pool<ProgramCache>>(); } // Tests that Pool implements the "single owner" optimization. That is, the // thread that first accesses the pool gets its own copy, while all other // threads get distinct copies. #[test] fn thread_owner_optimization() { use std::cell::RefCell; use std::sync::Arc; let pool: Arc<Pool<RefCell<Vec<char>>>> = Arc::new(Pool::new(Box::new(|| RefCell::new(vec!['a'])))); pool.get().value().borrow_mut().push('x'); let pool1 = pool.clone(); let t1 = std::thread::spawn(move || { let guard = pool1.get(); let v = guard.value(); v.borrow_mut().push('y'); }); let pool2 = pool.clone(); let t2 = std::thread::spawn(move || { let guard = pool2.get(); let v = guard.value(); v.borrow_mut().push('z'); }); t1.join().unwrap(); t2.join().unwrap(); // If we didn't implement the single owner optimization, then one of // the threads above is likely to have mutated the [a, x] vec that // we stuffed in the pool before spawning the threads. But since // neither thread was first to access the pool, and because of the // optimization, we should be guaranteed that neither thread mutates // the special owned pool value. // // (Technically this is an implementation detail and not a contract of // Pool's API.) assert_eq!(vec!['a', 'x'], *pool.get().value().borrow()); } }
/// The ID of the thread that owns this pool. The owner is the thread /// that makes the first call to 'get'. When the owner calls 'get', it
random_line_split
common.py
import random import re import string import torch from torch import nn import csv import os import argparse import json import shutil import logging import numpy as np import matplotlib.pyplot as plt from torch.utils.data import DataLoader, Dataset from torch.nn.utils.rnn import pad_sequence from rouge import Rouge import razdel from collections import Counter from pprint import pprint from tqdm import tqdm from itertools import chain as iter_chain from contextlib import contextmanager global_rouge = Rouge() @contextmanager def temp_np_seed(seed): state = np.random.get_state() try: np.random.seed(seed) yield finally: np.random.set_state(state) def arg2bool(v): if isinstance(v, int): if v == 0: return False elif v == 1: return True elif isinstance(v, str): if v.lower() in ('yes', 'true', '1'): return True elif v.lower() in ('no', 'false', '0'): return False raise argparse.ArgumentTypeError('Boolean value expected.') def calc_rouge(hyp, ref): assert isinstance(hyp, str) and isinstance(ref, str) assert len(ref.strip()) != 0 if len(hyp.strip()) != 0: return global_rouge.get_scores(hyps=hyp, refs=ref, avg=True) else: return calc_rouge('x', 'y') # zeros def calc_mean_rouge(rouges): res = calc_rouge('x', 'y') # zeros for item in rouges: for k1, d in res.items(): for k2 in d: res[k1][k2] += item[k1][k2] for k1, d in res.items(): for k2 in d: res[k1][k2] /= len(rouges) return res def str_rouge(rg): return f"R1 {rg['rouge-1']['f']:.02f}, R2 {rg['rouge-2']['f']:.02f}, RL {rg['rouge-l']['f']:.02f}" DEVICE = None def set_device(device): global DEVICE DEVICE = device if torch.cuda.is_available(): print(torch.cuda.get_device_properties(0)) print('Using', DEVICE) def get_device(): global DEVICE return DEVICE def set_seed(seed): # note: there are another nuances for gpu and multi-gpu random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def clear_or_create_directory(dir_name): """ ignoring all possible errors """ shutil.rmtree(dir_name, ignore_errors=True) cntr = 0 while True: try: os.makedirs(dir_name, exist_ok=True) return except OSError: if cntr < 10: # some windows bug? cntr += 1 from time import sleep sleep(0.1 * cntr) else: raise class SummarizationDataset(Dataset): def __init__(self, texts, titles): self.texts = texts self.titles = titles def __getitem__(self, item): return self.texts[item], self.titles[item] def __len__(self): return len(self.texts) class SimpleVocabulary: def __init__(self, all_words, max_vocab_size, pretrained_words=None): helper_symbols = ["<PAD>", "<UNK>", "<EOS>"] self.PAD_IDX = 0 self.UNK_IDX = 1 self.EOS_IDX = 2 counts = Counter(all_words) print(f'Number of unique input words: {len(counts)}') words = [w for w, c in counts.most_common(max_vocab_size)] num_words_added = len(helper_symbols) if pretrained_words is not None: pretrained_words = set(pretrained_words).difference(set(words)) num_words_added += len(pretrained_words) assert max_vocab_size >= num_words_added words = words[:-num_words_added] print( f'SimpleVocabulary:\n' f'{len(words)} words from input data,\n' f'{len(helper_symbols)} helper words,\n' f'{len(pretrained_words) if pretrained_words is not None else 0} pretrained words,' ) words = helper_symbols + words + (pretrained_words if pretrained_words is not None else []) print(f'{len(words)} words total') self.itos = words self.stoi = {s: i for i, s in enumerate(self.itos)} def encode(self, text): return [self.stoi.get(tok, self.UNK_IDX) for tok in text] + [self.EOS_IDX] def __iter__(self): return iter(self.itos) def __len__(self): return len(self.itos) def encode_text(tokenizer, texts, max_len=None): if isinstance(texts, str): texts = [texts] assert isinstance(texts, list) if max_len is None: max_len = 999999999 enc_texts = [tokenizer.encode( txt, return_tensors='pt', max_length=max_len, truncation=max_len is not None).squeeze(0) for txt in texts] texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id) return texts_batch def encode_text_end(tokenizer, texts, max_len=None): if isinstance(texts, str): texts = [texts] assert isinstance(texts, list) if max_len is None: max_len = 999999999 enc_texts = [] for txt in texts: enc = tokenizer.encode(txt, return_tensors='pt').squeeze(0) enc = torch.cat([torch.tensor([tokenizer.convert_tokens_to_ids('[CLS]')]).long(), enc[-max_len + 1:]]) enc_texts.append(enc) texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id) return texts_batch class CollateFnStart: def __init__(self, tokenizer, max_len_src, max_len_tgt): self.tokenizer = tokenizer self.max_len_src = max_len_src self.max_len_tgt = max_len_tgt def __call__(self, batch): return ( encode_text(self.tokenizer, [txt for txt, title in batch], self.max_len_src), encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt) ) class CollateFnEnd: """ takes end of text """ def __init__(self, tokenizer, max_len_src, max_len_tgt):
def __call__(self, batch): return ( encode_text_end(self.tokenizer, [txt for txt, title in batch], self.max_len_src), encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt) ) def decode_text(tokenizer, vocab_ids): return tokenizer.decode( vocab_ids.squeeze(), skip_special_tokens=True, clean_up_tokenization_spaces=True) def nltk_stem_sentence_rus(sentence): from nltk.tokenize import word_tokenize from nltk.stem.snowball import RussianStemmer from nltk.corpus import stopwords stemmer = RussianStemmer() russian_stopwords = set(stopwords.words("russian")) tokens = word_tokenize(sentence, language='russian') tokens = [t for t in tokens if re.search(r'\w', t) is not None and t not in russian_stopwords] stems = [stemmer.stem(t) for t in tokens] return ' '.join(stems) def lemmatize_sentence_rus(sentence): from nltk.corpus import stopwords from pymystem3 import Mystem my_stem = Mystem() russian_stopwords = set(stopwords.words("russian")) lemmas = my_stem.lemmatize(sentence) lemmas = [t for t in lemmas if re.search(r'\w', t) is not None and t not in russian_stopwords] return ' '.join(lemmas) def lemmatize_sentences_rus(sentences): """much faster than call lemmatize_sentence_rus in cycle""" split = 'fks2hwras1ma39hka766gbk' chunk_size = 10000 def handle_chunk(sentences_chunk): all_sents = (' ' + split + ' ').join(sentences_chunk) all_lemmas = lemmatize_sentence_rus(all_sents).split() chunk_res = [[]] for lemma in all_lemmas: if lemma == split: chunk_res.append([]) else: chunk_res[-1].append(lemma) return chunk_res res = [] i = 0 while i < len(sentences): if len(sentences) > chunk_size: print(f'Lemmatization: Done for {i} from {len(sentences)} sentences') i_step = min(chunk_size, len(sentences) - i) res.extend(handle_chunk(sentences[i:i + i_step])) i += i_step assert len(res) == len(sentences) res = [' '.join(arr) for arr in res] return res def lemmatize_texts_rus(texts): """split each text to sentences and lemmatize them""" sentenized = [[s.text for s in razdel.sentenize(t)] for t in texts] texts_lengths = [len(t) for t in sentenized] sentences = [s for t in sentenized for s in t] sentences_lemm = lemmatize_sentences_rus(sentences) texts_lemm = [] pos = 0 for text_length in texts_lengths: texts_lemm.append(sentences_lemm[pos:pos + text_length]) pos += text_length assert pos == len(sentences) assert len(sentenized) == len(texts_lemm) assert all(len(s) == len(a) for s, a in zip(sentenized, texts_lemm)) return texts_lemm, sentenized def lemmatize_text_rus(text): """split text to sentences and lemmatize them""" text_lemm, text_sent = lemmatize_texts_rus([text]) text_lemm, text_sent = text_lemm[0], text_sent[0] return text_lemm, text_sent def get_num_lines_in_file(file_path, *args, **kwargs): with open(file_path, *args, **kwargs) as f: return sum(1 for _ in f) class ConsoleColors: Map = { 'PINK': '\033[95m', 'BLUE': '\033[34m', 'YELLOW': '\033[93m', 'RED': '\033[31m', 'GREEN': '\033[92m', 'BOLD': '\033[1m', 'UNDERLINE': '\033[4m', 'ITALIC': '\033[3m', 'ENDCOLOR': '\033[0m', '': '\033[0m', } @staticmethod def wrap(string, color): return ConsoleColors.Map[color] + string + ConsoleColors.Map['ENDCOLOR'] def print_confusion_matrix(predicted, target, n_classes=None): def fmt(val): return f'{(val * 100).round(2):>5.1f}' def print_sep(sep): print(' ' + sep * str_len) if n_classes is None: n_classes = int(max(max(predicted), max(target)) + 1) confusion_matrix = np.zeros((n_classes, n_classes), np.int64) assert len(predicted) == len(target) for p, t in zip(predicted, target): confusion_matrix[t, p] += 1 confusion_matrix = confusion_matrix / confusion_matrix.sum() str_len = 8 * (n_classes + 1) + 11 row_str = ( ' | ' + ConsoleColors.wrap(fmt(np.diag(confusion_matrix).sum()), 'YELLOW') + ' || ' + ' | '.join( ConsoleColors.wrap(f'p ={i:>2}', 'BLUE') for i in range(n_classes) ) + ' || ' + ConsoleColors.wrap('all p', 'BLUE') + ' |' ) print() print_sep('-') print(row_str) for i, row in enumerate(confusion_matrix): row_str = ( ' | ' + ConsoleColors.wrap(f't ={i:>2}', 'BLUE') + ' || ' + ' | '.join( ConsoleColors.wrap(fmt(val), 'YELLOW' if i == j else '') for j, val in enumerate(row) ) + ' || ' + ConsoleColors.wrap(fmt(sum(row)), 'PINK') + ' |' ) print_sep('-' if i != 0 else '=') print(row_str) row_str = ( ' | ' + ConsoleColors.wrap('all t', 'BLUE') + ' || ' + ' | '.join( ConsoleColors.wrap(fmt(val), 'PINK') for val in confusion_matrix.sum(0) ) + ' || ' + ConsoleColors.wrap(fmt(confusion_matrix.sum()), 'YELLOW') + ' |' ) print_sep('=') print(row_str) print_sep('-') print() return confusion_matrix def sentenize_with_newlines(text): sents = text.split('\n') sents = [s.text for sent in sents for s in razdel.sentenize(sent) if len(s.text) != 0] return sents def plot_sents_hist(model_hist, target_hist, max_sents=64): plt.bar(np.arange(max_sents), target_hist[:max_sents], color='blue', width=0.2) plt.bar(np.arange(max_sents) + 0.5, model_hist[:max_sents], color='orange', width=0.2) plt.show() def chop_string(user_string, chunk_size=80, join=True): output = [] words = user_string.split(" ") total_length = 0 while total_length < len(user_string) and len(words) > 0: line = [] next_word = words[0] line_len = len(next_word) + 1 while len(line) == 0 or ((line_len < chunk_size) and len(words) > 0): words.pop(0) line.append(next_word) if len(words) > 0: next_word = words[0] line_len += len(next_word) + 1 line = " ".join(line) output.append(line) total_length += len(line) if join: return '\n'.join(output) return output def download_file_from_google_drive(id_from_the_link, destination): """https://stackoverflow.com/a/39225272""" import requests URL = "https://docs.google.com/uc?export=download" def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def save_response_content(response, destination): CHUNK_SIZE = 32768 with open(destination, "wb") as f: for chunk in response.iter_content(CHUNK_SIZE): if chunk: # filter out keep-alive new chunks f.write(chunk) session = requests.Session() response = session.get(URL, params={'id': id_from_the_link}, stream=True) token = get_confirm_token(response) if token: params = {'id': id_from_the_link, 'confirm': token} response = session.get(URL, params=params, stream=True) save_response_content(response, destination)
self.tokenizer = tokenizer self.max_len_src = max_len_src self.max_len_tgt = max_len_tgt
identifier_body
common.py
import random import re import string import torch from torch import nn import csv import os import argparse import json import shutil import logging import numpy as np import matplotlib.pyplot as plt from torch.utils.data import DataLoader, Dataset from torch.nn.utils.rnn import pad_sequence from rouge import Rouge import razdel from collections import Counter from pprint import pprint from tqdm import tqdm from itertools import chain as iter_chain from contextlib import contextmanager global_rouge = Rouge() @contextmanager def temp_np_seed(seed): state = np.random.get_state() try: np.random.seed(seed) yield finally: np.random.set_state(state) def arg2bool(v): if isinstance(v, int): if v == 0: return False elif v == 1: return True elif isinstance(v, str): if v.lower() in ('yes', 'true', '1'): return True elif v.lower() in ('no', 'false', '0'): return False raise argparse.ArgumentTypeError('Boolean value expected.') def calc_rouge(hyp, ref): assert isinstance(hyp, str) and isinstance(ref, str) assert len(ref.strip()) != 0 if len(hyp.strip()) != 0: return global_rouge.get_scores(hyps=hyp, refs=ref, avg=True) else: return calc_rouge('x', 'y') # zeros def calc_mean_rouge(rouges): res = calc_rouge('x', 'y') # zeros for item in rouges: for k1, d in res.items(): for k2 in d: res[k1][k2] += item[k1][k2] for k1, d in res.items(): for k2 in d: res[k1][k2] /= len(rouges) return res def str_rouge(rg): return f"R1 {rg['rouge-1']['f']:.02f}, R2 {rg['rouge-2']['f']:.02f}, RL {rg['rouge-l']['f']:.02f}" DEVICE = None def set_device(device): global DEVICE DEVICE = device if torch.cuda.is_available(): print(torch.cuda.get_device_properties(0)) print('Using', DEVICE) def get_device(): global DEVICE return DEVICE def set_seed(seed): # note: there are another nuances for gpu and multi-gpu random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def clear_or_create_directory(dir_name): """ ignoring all possible errors """ shutil.rmtree(dir_name, ignore_errors=True) cntr = 0 while True: try: os.makedirs(dir_name, exist_ok=True) return except OSError: if cntr < 10: # some windows bug? cntr += 1 from time import sleep sleep(0.1 * cntr) else: raise class SummarizationDataset(Dataset): def __init__(self, texts, titles): self.texts = texts self.titles = titles def __getitem__(self, item): return self.texts[item], self.titles[item] def __len__(self): return len(self.texts) class SimpleVocabulary: def __init__(self, all_words, max_vocab_size, pretrained_words=None): helper_symbols = ["<PAD>", "<UNK>", "<EOS>"] self.PAD_IDX = 0 self.UNK_IDX = 1 self.EOS_IDX = 2 counts = Counter(all_words) print(f'Number of unique input words: {len(counts)}') words = [w for w, c in counts.most_common(max_vocab_size)] num_words_added = len(helper_symbols) if pretrained_words is not None: pretrained_words = set(pretrained_words).difference(set(words)) num_words_added += len(pretrained_words) assert max_vocab_size >= num_words_added words = words[:-num_words_added] print( f'SimpleVocabulary:\n' f'{len(words)} words from input data,\n' f'{len(helper_symbols)} helper words,\n' f'{len(pretrained_words) if pretrained_words is not None else 0} pretrained words,' ) words = helper_symbols + words + (pretrained_words if pretrained_words is not None else []) print(f'{len(words)} words total') self.itos = words self.stoi = {s: i for i, s in enumerate(self.itos)} def encode(self, text): return [self.stoi.get(tok, self.UNK_IDX) for tok in text] + [self.EOS_IDX] def __iter__(self): return iter(self.itos) def __len__(self): return len(self.itos) def
(tokenizer, texts, max_len=None): if isinstance(texts, str): texts = [texts] assert isinstance(texts, list) if max_len is None: max_len = 999999999 enc_texts = [tokenizer.encode( txt, return_tensors='pt', max_length=max_len, truncation=max_len is not None).squeeze(0) for txt in texts] texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id) return texts_batch def encode_text_end(tokenizer, texts, max_len=None): if isinstance(texts, str): texts = [texts] assert isinstance(texts, list) if max_len is None: max_len = 999999999 enc_texts = [] for txt in texts: enc = tokenizer.encode(txt, return_tensors='pt').squeeze(0) enc = torch.cat([torch.tensor([tokenizer.convert_tokens_to_ids('[CLS]')]).long(), enc[-max_len + 1:]]) enc_texts.append(enc) texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id) return texts_batch class CollateFnStart: def __init__(self, tokenizer, max_len_src, max_len_tgt): self.tokenizer = tokenizer self.max_len_src = max_len_src self.max_len_tgt = max_len_tgt def __call__(self, batch): return ( encode_text(self.tokenizer, [txt for txt, title in batch], self.max_len_src), encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt) ) class CollateFnEnd: """ takes end of text """ def __init__(self, tokenizer, max_len_src, max_len_tgt): self.tokenizer = tokenizer self.max_len_src = max_len_src self.max_len_tgt = max_len_tgt def __call__(self, batch): return ( encode_text_end(self.tokenizer, [txt for txt, title in batch], self.max_len_src), encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt) ) def decode_text(tokenizer, vocab_ids): return tokenizer.decode( vocab_ids.squeeze(), skip_special_tokens=True, clean_up_tokenization_spaces=True) def nltk_stem_sentence_rus(sentence): from nltk.tokenize import word_tokenize from nltk.stem.snowball import RussianStemmer from nltk.corpus import stopwords stemmer = RussianStemmer() russian_stopwords = set(stopwords.words("russian")) tokens = word_tokenize(sentence, language='russian') tokens = [t for t in tokens if re.search(r'\w', t) is not None and t not in russian_stopwords] stems = [stemmer.stem(t) for t in tokens] return ' '.join(stems) def lemmatize_sentence_rus(sentence): from nltk.corpus import stopwords from pymystem3 import Mystem my_stem = Mystem() russian_stopwords = set(stopwords.words("russian")) lemmas = my_stem.lemmatize(sentence) lemmas = [t for t in lemmas if re.search(r'\w', t) is not None and t not in russian_stopwords] return ' '.join(lemmas) def lemmatize_sentences_rus(sentences): """much faster than call lemmatize_sentence_rus in cycle""" split = 'fks2hwras1ma39hka766gbk' chunk_size = 10000 def handle_chunk(sentences_chunk): all_sents = (' ' + split + ' ').join(sentences_chunk) all_lemmas = lemmatize_sentence_rus(all_sents).split() chunk_res = [[]] for lemma in all_lemmas: if lemma == split: chunk_res.append([]) else: chunk_res[-1].append(lemma) return chunk_res res = [] i = 0 while i < len(sentences): if len(sentences) > chunk_size: print(f'Lemmatization: Done for {i} from {len(sentences)} sentences') i_step = min(chunk_size, len(sentences) - i) res.extend(handle_chunk(sentences[i:i + i_step])) i += i_step assert len(res) == len(sentences) res = [' '.join(arr) for arr in res] return res def lemmatize_texts_rus(texts): """split each text to sentences and lemmatize them""" sentenized = [[s.text for s in razdel.sentenize(t)] for t in texts] texts_lengths = [len(t) for t in sentenized] sentences = [s for t in sentenized for s in t] sentences_lemm = lemmatize_sentences_rus(sentences) texts_lemm = [] pos = 0 for text_length in texts_lengths: texts_lemm.append(sentences_lemm[pos:pos + text_length]) pos += text_length assert pos == len(sentences) assert len(sentenized) == len(texts_lemm) assert all(len(s) == len(a) for s, a in zip(sentenized, texts_lemm)) return texts_lemm, sentenized def lemmatize_text_rus(text): """split text to sentences and lemmatize them""" text_lemm, text_sent = lemmatize_texts_rus([text]) text_lemm, text_sent = text_lemm[0], text_sent[0] return text_lemm, text_sent def get_num_lines_in_file(file_path, *args, **kwargs): with open(file_path, *args, **kwargs) as f: return sum(1 for _ in f) class ConsoleColors: Map = { 'PINK': '\033[95m', 'BLUE': '\033[34m', 'YELLOW': '\033[93m', 'RED': '\033[31m', 'GREEN': '\033[92m', 'BOLD': '\033[1m', 'UNDERLINE': '\033[4m', 'ITALIC': '\033[3m', 'ENDCOLOR': '\033[0m', '': '\033[0m', } @staticmethod def wrap(string, color): return ConsoleColors.Map[color] + string + ConsoleColors.Map['ENDCOLOR'] def print_confusion_matrix(predicted, target, n_classes=None): def fmt(val): return f'{(val * 100).round(2):>5.1f}' def print_sep(sep): print(' ' + sep * str_len) if n_classes is None: n_classes = int(max(max(predicted), max(target)) + 1) confusion_matrix = np.zeros((n_classes, n_classes), np.int64) assert len(predicted) == len(target) for p, t in zip(predicted, target): confusion_matrix[t, p] += 1 confusion_matrix = confusion_matrix / confusion_matrix.sum() str_len = 8 * (n_classes + 1) + 11 row_str = ( ' | ' + ConsoleColors.wrap(fmt(np.diag(confusion_matrix).sum()), 'YELLOW') + ' || ' + ' | '.join( ConsoleColors.wrap(f'p ={i:>2}', 'BLUE') for i in range(n_classes) ) + ' || ' + ConsoleColors.wrap('all p', 'BLUE') + ' |' ) print() print_sep('-') print(row_str) for i, row in enumerate(confusion_matrix): row_str = ( ' | ' + ConsoleColors.wrap(f't ={i:>2}', 'BLUE') + ' || ' + ' | '.join( ConsoleColors.wrap(fmt(val), 'YELLOW' if i == j else '') for j, val in enumerate(row) ) + ' || ' + ConsoleColors.wrap(fmt(sum(row)), 'PINK') + ' |' ) print_sep('-' if i != 0 else '=') print(row_str) row_str = ( ' | ' + ConsoleColors.wrap('all t', 'BLUE') + ' || ' + ' | '.join( ConsoleColors.wrap(fmt(val), 'PINK') for val in confusion_matrix.sum(0) ) + ' || ' + ConsoleColors.wrap(fmt(confusion_matrix.sum()), 'YELLOW') + ' |' ) print_sep('=') print(row_str) print_sep('-') print() return confusion_matrix def sentenize_with_newlines(text): sents = text.split('\n') sents = [s.text for sent in sents for s in razdel.sentenize(sent) if len(s.text) != 0] return sents def plot_sents_hist(model_hist, target_hist, max_sents=64): plt.bar(np.arange(max_sents), target_hist[:max_sents], color='blue', width=0.2) plt.bar(np.arange(max_sents) + 0.5, model_hist[:max_sents], color='orange', width=0.2) plt.show() def chop_string(user_string, chunk_size=80, join=True): output = [] words = user_string.split(" ") total_length = 0 while total_length < len(user_string) and len(words) > 0: line = [] next_word = words[0] line_len = len(next_word) + 1 while len(line) == 0 or ((line_len < chunk_size) and len(words) > 0): words.pop(0) line.append(next_word) if len(words) > 0: next_word = words[0] line_len += len(next_word) + 1 line = " ".join(line) output.append(line) total_length += len(line) if join: return '\n'.join(output) return output def download_file_from_google_drive(id_from_the_link, destination): """https://stackoverflow.com/a/39225272""" import requests URL = "https://docs.google.com/uc?export=download" def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def save_response_content(response, destination): CHUNK_SIZE = 32768 with open(destination, "wb") as f: for chunk in response.iter_content(CHUNK_SIZE): if chunk: # filter out keep-alive new chunks f.write(chunk) session = requests.Session() response = session.get(URL, params={'id': id_from_the_link}, stream=True) token = get_confirm_token(response) if token: params = {'id': id_from_the_link, 'confirm': token} response = session.get(URL, params=params, stream=True) save_response_content(response, destination)
encode_text
identifier_name
common.py
import random import re import string import torch from torch import nn import csv import os import argparse import json import shutil import logging import numpy as np import matplotlib.pyplot as plt from torch.utils.data import DataLoader, Dataset from torch.nn.utils.rnn import pad_sequence from rouge import Rouge import razdel from collections import Counter from pprint import pprint from tqdm import tqdm from itertools import chain as iter_chain from contextlib import contextmanager global_rouge = Rouge() @contextmanager def temp_np_seed(seed): state = np.random.get_state() try: np.random.seed(seed) yield finally: np.random.set_state(state) def arg2bool(v): if isinstance(v, int): if v == 0: return False elif v == 1: return True elif isinstance(v, str): if v.lower() in ('yes', 'true', '1'): return True elif v.lower() in ('no', 'false', '0'): return False raise argparse.ArgumentTypeError('Boolean value expected.') def calc_rouge(hyp, ref): assert isinstance(hyp, str) and isinstance(ref, str) assert len(ref.strip()) != 0 if len(hyp.strip()) != 0: return global_rouge.get_scores(hyps=hyp, refs=ref, avg=True) else: return calc_rouge('x', 'y') # zeros def calc_mean_rouge(rouges): res = calc_rouge('x', 'y') # zeros for item in rouges: for k1, d in res.items():
for k1, d in res.items(): for k2 in d: res[k1][k2] /= len(rouges) return res def str_rouge(rg): return f"R1 {rg['rouge-1']['f']:.02f}, R2 {rg['rouge-2']['f']:.02f}, RL {rg['rouge-l']['f']:.02f}" DEVICE = None def set_device(device): global DEVICE DEVICE = device if torch.cuda.is_available(): print(torch.cuda.get_device_properties(0)) print('Using', DEVICE) def get_device(): global DEVICE return DEVICE def set_seed(seed): # note: there are another nuances for gpu and multi-gpu random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def clear_or_create_directory(dir_name): """ ignoring all possible errors """ shutil.rmtree(dir_name, ignore_errors=True) cntr = 0 while True: try: os.makedirs(dir_name, exist_ok=True) return except OSError: if cntr < 10: # some windows bug? cntr += 1 from time import sleep sleep(0.1 * cntr) else: raise class SummarizationDataset(Dataset): def __init__(self, texts, titles): self.texts = texts self.titles = titles def __getitem__(self, item): return self.texts[item], self.titles[item] def __len__(self): return len(self.texts) class SimpleVocabulary: def __init__(self, all_words, max_vocab_size, pretrained_words=None): helper_symbols = ["<PAD>", "<UNK>", "<EOS>"] self.PAD_IDX = 0 self.UNK_IDX = 1 self.EOS_IDX = 2 counts = Counter(all_words) print(f'Number of unique input words: {len(counts)}') words = [w for w, c in counts.most_common(max_vocab_size)] num_words_added = len(helper_symbols) if pretrained_words is not None: pretrained_words = set(pretrained_words).difference(set(words)) num_words_added += len(pretrained_words) assert max_vocab_size >= num_words_added words = words[:-num_words_added] print( f'SimpleVocabulary:\n' f'{len(words)} words from input data,\n' f'{len(helper_symbols)} helper words,\n' f'{len(pretrained_words) if pretrained_words is not None else 0} pretrained words,' ) words = helper_symbols + words + (pretrained_words if pretrained_words is not None else []) print(f'{len(words)} words total') self.itos = words self.stoi = {s: i for i, s in enumerate(self.itos)} def encode(self, text): return [self.stoi.get(tok, self.UNK_IDX) for tok in text] + [self.EOS_IDX] def __iter__(self): return iter(self.itos) def __len__(self): return len(self.itos) def encode_text(tokenizer, texts, max_len=None): if isinstance(texts, str): texts = [texts] assert isinstance(texts, list) if max_len is None: max_len = 999999999 enc_texts = [tokenizer.encode( txt, return_tensors='pt', max_length=max_len, truncation=max_len is not None).squeeze(0) for txt in texts] texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id) return texts_batch def encode_text_end(tokenizer, texts, max_len=None): if isinstance(texts, str): texts = [texts] assert isinstance(texts, list) if max_len is None: max_len = 999999999 enc_texts = [] for txt in texts: enc = tokenizer.encode(txt, return_tensors='pt').squeeze(0) enc = torch.cat([torch.tensor([tokenizer.convert_tokens_to_ids('[CLS]')]).long(), enc[-max_len + 1:]]) enc_texts.append(enc) texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id) return texts_batch class CollateFnStart: def __init__(self, tokenizer, max_len_src, max_len_tgt): self.tokenizer = tokenizer self.max_len_src = max_len_src self.max_len_tgt = max_len_tgt def __call__(self, batch): return ( encode_text(self.tokenizer, [txt for txt, title in batch], self.max_len_src), encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt) ) class CollateFnEnd: """ takes end of text """ def __init__(self, tokenizer, max_len_src, max_len_tgt): self.tokenizer = tokenizer self.max_len_src = max_len_src self.max_len_tgt = max_len_tgt def __call__(self, batch): return ( encode_text_end(self.tokenizer, [txt for txt, title in batch], self.max_len_src), encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt) ) def decode_text(tokenizer, vocab_ids): return tokenizer.decode( vocab_ids.squeeze(), skip_special_tokens=True, clean_up_tokenization_spaces=True) def nltk_stem_sentence_rus(sentence): from nltk.tokenize import word_tokenize from nltk.stem.snowball import RussianStemmer from nltk.corpus import stopwords stemmer = RussianStemmer() russian_stopwords = set(stopwords.words("russian")) tokens = word_tokenize(sentence, language='russian') tokens = [t for t in tokens if re.search(r'\w', t) is not None and t not in russian_stopwords] stems = [stemmer.stem(t) for t in tokens] return ' '.join(stems) def lemmatize_sentence_rus(sentence): from nltk.corpus import stopwords from pymystem3 import Mystem my_stem = Mystem() russian_stopwords = set(stopwords.words("russian")) lemmas = my_stem.lemmatize(sentence) lemmas = [t for t in lemmas if re.search(r'\w', t) is not None and t not in russian_stopwords] return ' '.join(lemmas) def lemmatize_sentences_rus(sentences): """much faster than call lemmatize_sentence_rus in cycle""" split = 'fks2hwras1ma39hka766gbk' chunk_size = 10000 def handle_chunk(sentences_chunk): all_sents = (' ' + split + ' ').join(sentences_chunk) all_lemmas = lemmatize_sentence_rus(all_sents).split() chunk_res = [[]] for lemma in all_lemmas: if lemma == split: chunk_res.append([]) else: chunk_res[-1].append(lemma) return chunk_res res = [] i = 0 while i < len(sentences): if len(sentences) > chunk_size: print(f'Lemmatization: Done for {i} from {len(sentences)} sentences') i_step = min(chunk_size, len(sentences) - i) res.extend(handle_chunk(sentences[i:i + i_step])) i += i_step assert len(res) == len(sentences) res = [' '.join(arr) for arr in res] return res def lemmatize_texts_rus(texts): """split each text to sentences and lemmatize them""" sentenized = [[s.text for s in razdel.sentenize(t)] for t in texts] texts_lengths = [len(t) for t in sentenized] sentences = [s for t in sentenized for s in t] sentences_lemm = lemmatize_sentences_rus(sentences) texts_lemm = [] pos = 0 for text_length in texts_lengths: texts_lemm.append(sentences_lemm[pos:pos + text_length]) pos += text_length assert pos == len(sentences) assert len(sentenized) == len(texts_lemm) assert all(len(s) == len(a) for s, a in zip(sentenized, texts_lemm)) return texts_lemm, sentenized def lemmatize_text_rus(text): """split text to sentences and lemmatize them""" text_lemm, text_sent = lemmatize_texts_rus([text]) text_lemm, text_sent = text_lemm[0], text_sent[0] return text_lemm, text_sent def get_num_lines_in_file(file_path, *args, **kwargs): with open(file_path, *args, **kwargs) as f: return sum(1 for _ in f) class ConsoleColors: Map = { 'PINK': '\033[95m', 'BLUE': '\033[34m', 'YELLOW': '\033[93m', 'RED': '\033[31m', 'GREEN': '\033[92m', 'BOLD': '\033[1m', 'UNDERLINE': '\033[4m', 'ITALIC': '\033[3m', 'ENDCOLOR': '\033[0m', '': '\033[0m', } @staticmethod def wrap(string, color): return ConsoleColors.Map[color] + string + ConsoleColors.Map['ENDCOLOR'] def print_confusion_matrix(predicted, target, n_classes=None): def fmt(val): return f'{(val * 100).round(2):>5.1f}' def print_sep(sep): print(' ' + sep * str_len) if n_classes is None: n_classes = int(max(max(predicted), max(target)) + 1) confusion_matrix = np.zeros((n_classes, n_classes), np.int64) assert len(predicted) == len(target) for p, t in zip(predicted, target): confusion_matrix[t, p] += 1 confusion_matrix = confusion_matrix / confusion_matrix.sum() str_len = 8 * (n_classes + 1) + 11 row_str = ( ' | ' + ConsoleColors.wrap(fmt(np.diag(confusion_matrix).sum()), 'YELLOW') + ' || ' + ' | '.join( ConsoleColors.wrap(f'p ={i:>2}', 'BLUE') for i in range(n_classes) ) + ' || ' + ConsoleColors.wrap('all p', 'BLUE') + ' |' ) print() print_sep('-') print(row_str) for i, row in enumerate(confusion_matrix): row_str = ( ' | ' + ConsoleColors.wrap(f't ={i:>2}', 'BLUE') + ' || ' + ' | '.join( ConsoleColors.wrap(fmt(val), 'YELLOW' if i == j else '') for j, val in enumerate(row) ) + ' || ' + ConsoleColors.wrap(fmt(sum(row)), 'PINK') + ' |' ) print_sep('-' if i != 0 else '=') print(row_str) row_str = ( ' | ' + ConsoleColors.wrap('all t', 'BLUE') + ' || ' + ' | '.join( ConsoleColors.wrap(fmt(val), 'PINK') for val in confusion_matrix.sum(0) ) + ' || ' + ConsoleColors.wrap(fmt(confusion_matrix.sum()), 'YELLOW') + ' |' ) print_sep('=') print(row_str) print_sep('-') print() return confusion_matrix def sentenize_with_newlines(text): sents = text.split('\n') sents = [s.text for sent in sents for s in razdel.sentenize(sent) if len(s.text) != 0] return sents def plot_sents_hist(model_hist, target_hist, max_sents=64): plt.bar(np.arange(max_sents), target_hist[:max_sents], color='blue', width=0.2) plt.bar(np.arange(max_sents) + 0.5, model_hist[:max_sents], color='orange', width=0.2) plt.show() def chop_string(user_string, chunk_size=80, join=True): output = [] words = user_string.split(" ") total_length = 0 while total_length < len(user_string) and len(words) > 0: line = [] next_word = words[0] line_len = len(next_word) + 1 while len(line) == 0 or ((line_len < chunk_size) and len(words) > 0): words.pop(0) line.append(next_word) if len(words) > 0: next_word = words[0] line_len += len(next_word) + 1 line = " ".join(line) output.append(line) total_length += len(line) if join: return '\n'.join(output) return output def download_file_from_google_drive(id_from_the_link, destination): """https://stackoverflow.com/a/39225272""" import requests URL = "https://docs.google.com/uc?export=download" def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def save_response_content(response, destination): CHUNK_SIZE = 32768 with open(destination, "wb") as f: for chunk in response.iter_content(CHUNK_SIZE): if chunk: # filter out keep-alive new chunks f.write(chunk) session = requests.Session() response = session.get(URL, params={'id': id_from_the_link}, stream=True) token = get_confirm_token(response) if token: params = {'id': id_from_the_link, 'confirm': token} response = session.get(URL, params=params, stream=True) save_response_content(response, destination)
for k2 in d: res[k1][k2] += item[k1][k2]
conditional_block
common.py
import random import re import string import torch from torch import nn import csv import os import argparse import json import shutil import logging import numpy as np import matplotlib.pyplot as plt from torch.utils.data import DataLoader, Dataset from torch.nn.utils.rnn import pad_sequence from rouge import Rouge import razdel from collections import Counter from pprint import pprint from tqdm import tqdm from itertools import chain as iter_chain from contextlib import contextmanager global_rouge = Rouge() @contextmanager def temp_np_seed(seed): state = np.random.get_state() try: np.random.seed(seed) yield finally: np.random.set_state(state) def arg2bool(v): if isinstance(v, int): if v == 0: return False elif v == 1: return True elif isinstance(v, str): if v.lower() in ('yes', 'true', '1'): return True elif v.lower() in ('no', 'false', '0'): return False raise argparse.ArgumentTypeError('Boolean value expected.') def calc_rouge(hyp, ref): assert isinstance(hyp, str) and isinstance(ref, str) assert len(ref.strip()) != 0 if len(hyp.strip()) != 0: return global_rouge.get_scores(hyps=hyp, refs=ref, avg=True) else: return calc_rouge('x', 'y') # zeros def calc_mean_rouge(rouges): res = calc_rouge('x', 'y') # zeros for item in rouges: for k1, d in res.items(): for k2 in d: res[k1][k2] += item[k1][k2] for k1, d in res.items(): for k2 in d: res[k1][k2] /= len(rouges) return res
def str_rouge(rg): return f"R1 {rg['rouge-1']['f']:.02f}, R2 {rg['rouge-2']['f']:.02f}, RL {rg['rouge-l']['f']:.02f}" DEVICE = None def set_device(device): global DEVICE DEVICE = device if torch.cuda.is_available(): print(torch.cuda.get_device_properties(0)) print('Using', DEVICE) def get_device(): global DEVICE return DEVICE def set_seed(seed): # note: there are another nuances for gpu and multi-gpu random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def clear_or_create_directory(dir_name): """ ignoring all possible errors """ shutil.rmtree(dir_name, ignore_errors=True) cntr = 0 while True: try: os.makedirs(dir_name, exist_ok=True) return except OSError: if cntr < 10: # some windows bug? cntr += 1 from time import sleep sleep(0.1 * cntr) else: raise class SummarizationDataset(Dataset): def __init__(self, texts, titles): self.texts = texts self.titles = titles def __getitem__(self, item): return self.texts[item], self.titles[item] def __len__(self): return len(self.texts) class SimpleVocabulary: def __init__(self, all_words, max_vocab_size, pretrained_words=None): helper_symbols = ["<PAD>", "<UNK>", "<EOS>"] self.PAD_IDX = 0 self.UNK_IDX = 1 self.EOS_IDX = 2 counts = Counter(all_words) print(f'Number of unique input words: {len(counts)}') words = [w for w, c in counts.most_common(max_vocab_size)] num_words_added = len(helper_symbols) if pretrained_words is not None: pretrained_words = set(pretrained_words).difference(set(words)) num_words_added += len(pretrained_words) assert max_vocab_size >= num_words_added words = words[:-num_words_added] print( f'SimpleVocabulary:\n' f'{len(words)} words from input data,\n' f'{len(helper_symbols)} helper words,\n' f'{len(pretrained_words) if pretrained_words is not None else 0} pretrained words,' ) words = helper_symbols + words + (pretrained_words if pretrained_words is not None else []) print(f'{len(words)} words total') self.itos = words self.stoi = {s: i for i, s in enumerate(self.itos)} def encode(self, text): return [self.stoi.get(tok, self.UNK_IDX) for tok in text] + [self.EOS_IDX] def __iter__(self): return iter(self.itos) def __len__(self): return len(self.itos) def encode_text(tokenizer, texts, max_len=None): if isinstance(texts, str): texts = [texts] assert isinstance(texts, list) if max_len is None: max_len = 999999999 enc_texts = [tokenizer.encode( txt, return_tensors='pt', max_length=max_len, truncation=max_len is not None).squeeze(0) for txt in texts] texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id) return texts_batch def encode_text_end(tokenizer, texts, max_len=None): if isinstance(texts, str): texts = [texts] assert isinstance(texts, list) if max_len is None: max_len = 999999999 enc_texts = [] for txt in texts: enc = tokenizer.encode(txt, return_tensors='pt').squeeze(0) enc = torch.cat([torch.tensor([tokenizer.convert_tokens_to_ids('[CLS]')]).long(), enc[-max_len + 1:]]) enc_texts.append(enc) texts_batch = pad_sequence(enc_texts, batch_first=True, padding_value=tokenizer.pad_token_id) return texts_batch class CollateFnStart: def __init__(self, tokenizer, max_len_src, max_len_tgt): self.tokenizer = tokenizer self.max_len_src = max_len_src self.max_len_tgt = max_len_tgt def __call__(self, batch): return ( encode_text(self.tokenizer, [txt for txt, title in batch], self.max_len_src), encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt) ) class CollateFnEnd: """ takes end of text """ def __init__(self, tokenizer, max_len_src, max_len_tgt): self.tokenizer = tokenizer self.max_len_src = max_len_src self.max_len_tgt = max_len_tgt def __call__(self, batch): return ( encode_text_end(self.tokenizer, [txt for txt, title in batch], self.max_len_src), encode_text(self.tokenizer, [title for txt, title in batch], self.max_len_tgt) ) def decode_text(tokenizer, vocab_ids): return tokenizer.decode( vocab_ids.squeeze(), skip_special_tokens=True, clean_up_tokenization_spaces=True) def nltk_stem_sentence_rus(sentence): from nltk.tokenize import word_tokenize from nltk.stem.snowball import RussianStemmer from nltk.corpus import stopwords stemmer = RussianStemmer() russian_stopwords = set(stopwords.words("russian")) tokens = word_tokenize(sentence, language='russian') tokens = [t for t in tokens if re.search(r'\w', t) is not None and t not in russian_stopwords] stems = [stemmer.stem(t) for t in tokens] return ' '.join(stems) def lemmatize_sentence_rus(sentence): from nltk.corpus import stopwords from pymystem3 import Mystem my_stem = Mystem() russian_stopwords = set(stopwords.words("russian")) lemmas = my_stem.lemmatize(sentence) lemmas = [t for t in lemmas if re.search(r'\w', t) is not None and t not in russian_stopwords] return ' '.join(lemmas) def lemmatize_sentences_rus(sentences): """much faster than call lemmatize_sentence_rus in cycle""" split = 'fks2hwras1ma39hka766gbk' chunk_size = 10000 def handle_chunk(sentences_chunk): all_sents = (' ' + split + ' ').join(sentences_chunk) all_lemmas = lemmatize_sentence_rus(all_sents).split() chunk_res = [[]] for lemma in all_lemmas: if lemma == split: chunk_res.append([]) else: chunk_res[-1].append(lemma) return chunk_res res = [] i = 0 while i < len(sentences): if len(sentences) > chunk_size: print(f'Lemmatization: Done for {i} from {len(sentences)} sentences') i_step = min(chunk_size, len(sentences) - i) res.extend(handle_chunk(sentences[i:i + i_step])) i += i_step assert len(res) == len(sentences) res = [' '.join(arr) for arr in res] return res def lemmatize_texts_rus(texts): """split each text to sentences and lemmatize them""" sentenized = [[s.text for s in razdel.sentenize(t)] for t in texts] texts_lengths = [len(t) for t in sentenized] sentences = [s for t in sentenized for s in t] sentences_lemm = lemmatize_sentences_rus(sentences) texts_lemm = [] pos = 0 for text_length in texts_lengths: texts_lemm.append(sentences_lemm[pos:pos + text_length]) pos += text_length assert pos == len(sentences) assert len(sentenized) == len(texts_lemm) assert all(len(s) == len(a) for s, a in zip(sentenized, texts_lemm)) return texts_lemm, sentenized def lemmatize_text_rus(text): """split text to sentences and lemmatize them""" text_lemm, text_sent = lemmatize_texts_rus([text]) text_lemm, text_sent = text_lemm[0], text_sent[0] return text_lemm, text_sent def get_num_lines_in_file(file_path, *args, **kwargs): with open(file_path, *args, **kwargs) as f: return sum(1 for _ in f) class ConsoleColors: Map = { 'PINK': '\033[95m', 'BLUE': '\033[34m', 'YELLOW': '\033[93m', 'RED': '\033[31m', 'GREEN': '\033[92m', 'BOLD': '\033[1m', 'UNDERLINE': '\033[4m', 'ITALIC': '\033[3m', 'ENDCOLOR': '\033[0m', '': '\033[0m', } @staticmethod def wrap(string, color): return ConsoleColors.Map[color] + string + ConsoleColors.Map['ENDCOLOR'] def print_confusion_matrix(predicted, target, n_classes=None): def fmt(val): return f'{(val * 100).round(2):>5.1f}' def print_sep(sep): print(' ' + sep * str_len) if n_classes is None: n_classes = int(max(max(predicted), max(target)) + 1) confusion_matrix = np.zeros((n_classes, n_classes), np.int64) assert len(predicted) == len(target) for p, t in zip(predicted, target): confusion_matrix[t, p] += 1 confusion_matrix = confusion_matrix / confusion_matrix.sum() str_len = 8 * (n_classes + 1) + 11 row_str = ( ' | ' + ConsoleColors.wrap(fmt(np.diag(confusion_matrix).sum()), 'YELLOW') + ' || ' + ' | '.join( ConsoleColors.wrap(f'p ={i:>2}', 'BLUE') for i in range(n_classes) ) + ' || ' + ConsoleColors.wrap('all p', 'BLUE') + ' |' ) print() print_sep('-') print(row_str) for i, row in enumerate(confusion_matrix): row_str = ( ' | ' + ConsoleColors.wrap(f't ={i:>2}', 'BLUE') + ' || ' + ' | '.join( ConsoleColors.wrap(fmt(val), 'YELLOW' if i == j else '') for j, val in enumerate(row) ) + ' || ' + ConsoleColors.wrap(fmt(sum(row)), 'PINK') + ' |' ) print_sep('-' if i != 0 else '=') print(row_str) row_str = ( ' | ' + ConsoleColors.wrap('all t', 'BLUE') + ' || ' + ' | '.join( ConsoleColors.wrap(fmt(val), 'PINK') for val in confusion_matrix.sum(0) ) + ' || ' + ConsoleColors.wrap(fmt(confusion_matrix.sum()), 'YELLOW') + ' |' ) print_sep('=') print(row_str) print_sep('-') print() return confusion_matrix def sentenize_with_newlines(text): sents = text.split('\n') sents = [s.text for sent in sents for s in razdel.sentenize(sent) if len(s.text) != 0] return sents def plot_sents_hist(model_hist, target_hist, max_sents=64): plt.bar(np.arange(max_sents), target_hist[:max_sents], color='blue', width=0.2) plt.bar(np.arange(max_sents) + 0.5, model_hist[:max_sents], color='orange', width=0.2) plt.show() def chop_string(user_string, chunk_size=80, join=True): output = [] words = user_string.split(" ") total_length = 0 while total_length < len(user_string) and len(words) > 0: line = [] next_word = words[0] line_len = len(next_word) + 1 while len(line) == 0 or ((line_len < chunk_size) and len(words) > 0): words.pop(0) line.append(next_word) if len(words) > 0: next_word = words[0] line_len += len(next_word) + 1 line = " ".join(line) output.append(line) total_length += len(line) if join: return '\n'.join(output) return output def download_file_from_google_drive(id_from_the_link, destination): """https://stackoverflow.com/a/39225272""" import requests URL = "https://docs.google.com/uc?export=download" def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def save_response_content(response, destination): CHUNK_SIZE = 32768 with open(destination, "wb") as f: for chunk in response.iter_content(CHUNK_SIZE): if chunk: # filter out keep-alive new chunks f.write(chunk) session = requests.Session() response = session.get(URL, params={'id': id_from_the_link}, stream=True) token = get_confirm_token(response) if token: params = {'id': id_from_the_link, 'confirm': token} response = session.get(URL, params=params, stream=True) save_response_content(response, destination)
random_line_split
manager.py
""" Python script to manage running jobs, checking the results, etc. """ import argparse import os import pprint import subprocess import sys from argparse import RawTextHelpFormatter def run(cmd): print(cmd) try: res = subprocess.check_output(cmd, shell=True).decode('utf-8') except subprocess.CalledProcessError as cpe: print("Error runn command " + cmd) print("Output = " + cpe.output.decode('utf-8')) raise cpe return res class TimeInterval: def __init__(self, start, end): """ TODO: Check times formatting properly here """ self.start = start self.end = end class MovieAdvisorManager: # Put these into a sensible order possible_actions = [ 'help-actions', 'install-bento', 'create-tables', 'import-ratings', 'import-user-info', 'import-movie-info', 'train-item-item-cf', 'register-freshener', ] actions_help = { "install-bento": "Will set up a bento box for you. Assumes that you are in a directory with a tar.gz " "file for the latest bento build. This command will rm -rf your current bento box " "(which is also assumed to be in your current directory).", "create-tables": "Deletes the currently existing user and content tables and recreates them from DDL.", "import-ratings": "Run an express job to import movie ratings", "import-user-info": "Run an express job to import user information", "import-movie-info": "Run an express job to import movie information", "train-item-item-cf": "Calculate item-item similarities", "register-freshener": "Register freshener for scoring function", "help-actions": "Print this help", } jars = ( # 'train/target/train-1.0-SNAPSHOT-jar-with-dependencies.jar', #'schema/target/schema-1.0-SNAPSHOT-jar-with-dependencies.jar', 'avro/target/movie-advisor-avro-1.0-SNAPSHOT.jar', # Needed on classpath for registering the scoring function and executing it. 'movie-advisor-scoring/target/movie-advisor-scoring-1.0-SNAPSHOT.jar' ) # This is the path from movie_advisor_home ddls = ( 'layout/src/main/resources/users.ddl', 'layout/src/main/resources/movies.ddl', ) express_jar = 'express/target/express-1.0-SNAPSHOT.jar' # assert set(actions_help.keys()) == set(possible_actions) def _help_actions(self): """ Print detailed information about how the different actions work """ actions_str = "" for (key, value) in self.actions_help.items():
print(actions_str) sys.exit(0) def _setup_parser(self): """ Add actions for the command-line arguments parser """ parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description="Manage Kiji stuff for MovieAdvisor. Available actions:\n\t" + \ "\n\t".join(self.possible_actions)) # TODO: Detailed help information that prints out all of the available actions and their # assumptions parser.add_argument( "action", nargs='*', help="Action to take") parser.add_argument( '--bento-home', help='Location of bento box', default='kiji-bento-ebi') parser.add_argument( '--bento-tgz', help='Bento TAR file name', default='kiji-bento-ebi-2.0.2-release.tar.gz') parser.add_argument( '--movie-advisor-home', help='Location of checkout of WibiData MovieAdvisor github repo', default='movie-advisor') # Set up dates for training, testing, etc. parser.add_argument( '--train-start-date', default='2013-11-01') parser.add_argument( '--train-end-date', default='2013-11-15') parser.add_argument( '--test-start-date', default='2013-11-16') parser.add_argument( '--test-end-date', default='2013-11-30') parser.add_argument( "--backtest-results-file", default="backtest.txt") parser.add_argument( "--kill-bento", action="store_true", default=False, help="Automatically kill existing BentoBox processes.") parser.add_argument( "--show-classpath", action="store_true", default=False, help="Echo $KIJI_CLASSPATH and exit") return parser def _setup_environment_vars(self, opts): """ Set up useful variables (would be environment vars outside of the script) """ # Check that these directories actually exist assert os.path.isdir(opts.movie_advisor_home) #if not 'install-bento' in self.actions: assert os.path.isdir(opts.bento_home) self.movie_advisor_home = opts.movie_advisor_home self.bento_home = opts.bento_home self.bento_tgz = opts.bento_tgz self.kiji_uri = "kiji://.env/tutorial" # "express job" takes a jar file as an argument assert os.path.isfile(os.path.join(self.movie_advisor_home, self.express_jar)) # Set the classpath for all of the commands that we'll run jarsFullPaths = [os.path.join(self.movie_advisor_home, j) for j in self.jars] for jar in jarsFullPaths: assert os.path.isfile(jar) classpath = ":".join(jarsFullPaths) os.environ['KIJI_CLASSPATH'] = classpath if opts.show_classpath: print("export KIJI_CLASSPATH=%s" % classpath) sys.exit(0) def _parse_options(self, args): """ Parse the command-line options and configure the script appropriately """ parser = self._setup_parser() opts = parser.parse_args(args) self.actions = opts.action for action in self.actions: assert action in self.possible_actions, "Action %s is not a known action for the script" % action self.b_kill_bento = opts.kill_bento if 'help-actions' in self.actions: self._help_actions() self._setup_environment_vars(opts) self.backtest_results_file = opts.backtest_results_file def _exit_if_bento_still_running(self): jps_results = run('jps') if jps_results.lower().find('minicluster') != -1 and not self.b_kill_bento: assert False, "Please kill all bento-related jobs (run 'jps' to get a list)" # Kill all of the bento processes for line in jps_results.splitlines(): toks = line.split() if len(toks) == 1: continue assert len(toks) == 2, toks (pid, job) = toks if job == 'Jps': continue cmd = "kill -9 " + pid run(cmd) def _do_action_bento_setup(self): """ Install the BentoBox, install Kiji, etc. """ self._exit_if_bento_still_running() cmd = "rm -rf {bento_dir}; tar -zxvf {bento_tar}".format( bento_dir=self.bento_home, bento_tar=self.bento_tgz) print(run(cmd)) for command_suffix in ["-env.sh", ""]: kiji_env = os.path.join(self.bento_home, "bin", "kiji" + command_suffix) bento_env = os.path.join(self.bento_home, "bin", "bento" + command_suffix) if not os.path.isfile(kiji_env): assert os.path.isfile(bento_env) cmd = 'cp {bento_env} {kiji_env}'.format( bento_env=bento_env, kiji_env=kiji_env) run(cmd) cmd = "cd {bento_dir}; source bin/kiji-env.sh; bento start".format( bento_dir=self.bento_home, ) print(run(cmd)) assert os.path.isdir(self.bento_home) def _run_express_job(self, class_name, options=""): """ Run any express job. Handles a lot of boilerplate for all of the Directv jobs (specifying dates, kiji table, etc. """ cmd = "source {bento_home}/bin/kiji-env.sh; express job {jar} {myclass} --kiji {kiji_uri}" cmd = cmd.format( bento_home=self.bento_home, jar=os.path.join(self.movie_advisor_home, self.express_jar), myclass=class_name, kiji_uri=self.kiji_uri, ) + " " + options print(run(cmd)) def _run_kiji_job(self, cmd): cmd = "source {bento_home}/bin/kiji-env.sh; {cmd}".format( bento_home=self.bento_home, cmd=cmd) print(run(cmd)) def _scan_table(self, uri): """ Scan this table and print out a couple of rows as a sanity check """ cmd = 'kiji scan {kiji_uri}/{uri} --max-versions=10'.format( kiji_uri=self.kiji_uri, uri=uri) self._run_kiji_job(cmd) def _do_action_tables_create(self): """ Run the schema shell to create the tables """ schema_shell = os.path.join(self.bento_home, "schema-shell", "bin", "kiji-schema-shell") assert os.path.isfile(schema_shell), schema_shell # Delete the table first! cmd = ( "kiji delete --target={kiji_uri} --interactive=false; " + "kiji install --kiji={kiji_uri}" ).format(kiji_uri=self.kiji_uri) self._run_kiji_job(cmd) for ddl in self.ddls: ddl_full_path = os.path.join(self.movie_advisor_home, ddl) assert os.path.isfile(ddl_full_path) cmd = "{schema_shell} --kiji={kiji_uri} --file={ddl_full_path}".format( schema_shell=schema_shell, kiji_uri=self.kiji_uri, ddl_full_path=ddl_full_path) self._run_kiji_job(cmd) def _do_action_calculate_similarity_cosine_express(self): """ Run the cosine similarity calculator with the appropriate time range. """ self._run_express_job("com.directv.recommend.express.CosineCFTrainer") self._scan_table("content/item_item_similarities") def _do_action_import_ratings(self): """ Import the movie ratings with an Express job. """ self._run_express_job( "org.kiji.tutorial.load.MovieRatingsImporter", options="--ratings ml-100k/u.data" ) self._scan_table("users") def _do_action_import_user_info(self): """ Import the user metadata with an Express job. """ self._run_express_job( "org.kiji.tutorial.load.UserInfoImporter", options="--user-info ml-100k/u.user" ) self._scan_table("users") def _do_action_import_movie_info(self): """ Import the movie metadata with an Express job. """ self._run_express_job( "org.kiji.tutorial.load.MovieInfoImporter", options="--movie-info ml-100k/u.item" ) self._scan_table("movies") def _do_action_train(self): """ Import the movie metadata with an Express job. """ self._run_express_job( "org.kiji.tutorial.train.ItemSimilarityCalculator" ) self._scan_table("movies") def _do_action_register_freshener(self): """ Register the score function! """ policy = "org.kiji.scoring.lib.AlwaysFreshen" score_function = "org.kiji.tutorial.scoring.MovieRecommendationScoreFunction" params = \ '{org.kiji.tutorial.scoring.MovieRecommendationScoreFunction.kvstore_table_uri:"%s/movies/"}' % self.kiji_uri target = self.kiji_uri + "/users/recommendations:foo" self._run_kiji_job( "kiji fresh --do=register --policy-class=%s --score-function-class=%s --parameters='%s' --target=%s" % ( policy, score_function, params, target ) ) def _run_actions(self): """ Run whatever actions the user has specified """ if "install-bento" in self.actions: self._do_action_bento_setup() if "create-tables" in self.actions: self._do_action_tables_create() if "import-ratings" in self.actions: self._do_action_import_ratings() if "import-user-info" in self.actions: self._do_action_import_user_info() if "import-movie-info" in self.actions: self._do_action_import_movie_info() if "train-item-item-cf" in self.actions: self._do_action_train() if "register-freshener" in self.actions: self._do_action_register_freshener() def go(self, args): self._parse_options(args) self._run_actions() if __name__ == "__main__": MovieAdvisorManager().go(sys.argv[1:])
actions_str += "command: %s\n%s\n\n" % (key, value)
conditional_block
manager.py
""" Python script to manage running jobs, checking the results, etc. """ import argparse import os import pprint import subprocess import sys from argparse import RawTextHelpFormatter def run(cmd):
class TimeInterval: def __init__(self, start, end): """ TODO: Check times formatting properly here """ self.start = start self.end = end class MovieAdvisorManager: # Put these into a sensible order possible_actions = [ 'help-actions', 'install-bento', 'create-tables', 'import-ratings', 'import-user-info', 'import-movie-info', 'train-item-item-cf', 'register-freshener', ] actions_help = { "install-bento": "Will set up a bento box for you. Assumes that you are in a directory with a tar.gz " "file for the latest bento build. This command will rm -rf your current bento box " "(which is also assumed to be in your current directory).", "create-tables": "Deletes the currently existing user and content tables and recreates them from DDL.", "import-ratings": "Run an express job to import movie ratings", "import-user-info": "Run an express job to import user information", "import-movie-info": "Run an express job to import movie information", "train-item-item-cf": "Calculate item-item similarities", "register-freshener": "Register freshener for scoring function", "help-actions": "Print this help", } jars = ( # 'train/target/train-1.0-SNAPSHOT-jar-with-dependencies.jar', #'schema/target/schema-1.0-SNAPSHOT-jar-with-dependencies.jar', 'avro/target/movie-advisor-avro-1.0-SNAPSHOT.jar', # Needed on classpath for registering the scoring function and executing it. 'movie-advisor-scoring/target/movie-advisor-scoring-1.0-SNAPSHOT.jar' ) # This is the path from movie_advisor_home ddls = ( 'layout/src/main/resources/users.ddl', 'layout/src/main/resources/movies.ddl', ) express_jar = 'express/target/express-1.0-SNAPSHOT.jar' # assert set(actions_help.keys()) == set(possible_actions) def _help_actions(self): """ Print detailed information about how the different actions work """ actions_str = "" for (key, value) in self.actions_help.items(): actions_str += "command: %s\n%s\n\n" % (key, value) print(actions_str) sys.exit(0) def _setup_parser(self): """ Add actions for the command-line arguments parser """ parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description="Manage Kiji stuff for MovieAdvisor. Available actions:\n\t" + \ "\n\t".join(self.possible_actions)) # TODO: Detailed help information that prints out all of the available actions and their # assumptions parser.add_argument( "action", nargs='*', help="Action to take") parser.add_argument( '--bento-home', help='Location of bento box', default='kiji-bento-ebi') parser.add_argument( '--bento-tgz', help='Bento TAR file name', default='kiji-bento-ebi-2.0.2-release.tar.gz') parser.add_argument( '--movie-advisor-home', help='Location of checkout of WibiData MovieAdvisor github repo', default='movie-advisor') # Set up dates for training, testing, etc. parser.add_argument( '--train-start-date', default='2013-11-01') parser.add_argument( '--train-end-date', default='2013-11-15') parser.add_argument( '--test-start-date', default='2013-11-16') parser.add_argument( '--test-end-date', default='2013-11-30') parser.add_argument( "--backtest-results-file", default="backtest.txt") parser.add_argument( "--kill-bento", action="store_true", default=False, help="Automatically kill existing BentoBox processes.") parser.add_argument( "--show-classpath", action="store_true", default=False, help="Echo $KIJI_CLASSPATH and exit") return parser def _setup_environment_vars(self, opts): """ Set up useful variables (would be environment vars outside of the script) """ # Check that these directories actually exist assert os.path.isdir(opts.movie_advisor_home) #if not 'install-bento' in self.actions: assert os.path.isdir(opts.bento_home) self.movie_advisor_home = opts.movie_advisor_home self.bento_home = opts.bento_home self.bento_tgz = opts.bento_tgz self.kiji_uri = "kiji://.env/tutorial" # "express job" takes a jar file as an argument assert os.path.isfile(os.path.join(self.movie_advisor_home, self.express_jar)) # Set the classpath for all of the commands that we'll run jarsFullPaths = [os.path.join(self.movie_advisor_home, j) for j in self.jars] for jar in jarsFullPaths: assert os.path.isfile(jar) classpath = ":".join(jarsFullPaths) os.environ['KIJI_CLASSPATH'] = classpath if opts.show_classpath: print("export KIJI_CLASSPATH=%s" % classpath) sys.exit(0) def _parse_options(self, args): """ Parse the command-line options and configure the script appropriately """ parser = self._setup_parser() opts = parser.parse_args(args) self.actions = opts.action for action in self.actions: assert action in self.possible_actions, "Action %s is not a known action for the script" % action self.b_kill_bento = opts.kill_bento if 'help-actions' in self.actions: self._help_actions() self._setup_environment_vars(opts) self.backtest_results_file = opts.backtest_results_file def _exit_if_bento_still_running(self): jps_results = run('jps') if jps_results.lower().find('minicluster') != -1 and not self.b_kill_bento: assert False, "Please kill all bento-related jobs (run 'jps' to get a list)" # Kill all of the bento processes for line in jps_results.splitlines(): toks = line.split() if len(toks) == 1: continue assert len(toks) == 2, toks (pid, job) = toks if job == 'Jps': continue cmd = "kill -9 " + pid run(cmd) def _do_action_bento_setup(self): """ Install the BentoBox, install Kiji, etc. """ self._exit_if_bento_still_running() cmd = "rm -rf {bento_dir}; tar -zxvf {bento_tar}".format( bento_dir=self.bento_home, bento_tar=self.bento_tgz) print(run(cmd)) for command_suffix in ["-env.sh", ""]: kiji_env = os.path.join(self.bento_home, "bin", "kiji" + command_suffix) bento_env = os.path.join(self.bento_home, "bin", "bento" + command_suffix) if not os.path.isfile(kiji_env): assert os.path.isfile(bento_env) cmd = 'cp {bento_env} {kiji_env}'.format( bento_env=bento_env, kiji_env=kiji_env) run(cmd) cmd = "cd {bento_dir}; source bin/kiji-env.sh; bento start".format( bento_dir=self.bento_home, ) print(run(cmd)) assert os.path.isdir(self.bento_home) def _run_express_job(self, class_name, options=""): """ Run any express job. Handles a lot of boilerplate for all of the Directv jobs (specifying dates, kiji table, etc. """ cmd = "source {bento_home}/bin/kiji-env.sh; express job {jar} {myclass} --kiji {kiji_uri}" cmd = cmd.format( bento_home=self.bento_home, jar=os.path.join(self.movie_advisor_home, self.express_jar), myclass=class_name, kiji_uri=self.kiji_uri, ) + " " + options print(run(cmd)) def _run_kiji_job(self, cmd): cmd = "source {bento_home}/bin/kiji-env.sh; {cmd}".format( bento_home=self.bento_home, cmd=cmd) print(run(cmd)) def _scan_table(self, uri): """ Scan this table and print out a couple of rows as a sanity check """ cmd = 'kiji scan {kiji_uri}/{uri} --max-versions=10'.format( kiji_uri=self.kiji_uri, uri=uri) self._run_kiji_job(cmd) def _do_action_tables_create(self): """ Run the schema shell to create the tables """ schema_shell = os.path.join(self.bento_home, "schema-shell", "bin", "kiji-schema-shell") assert os.path.isfile(schema_shell), schema_shell # Delete the table first! cmd = ( "kiji delete --target={kiji_uri} --interactive=false; " + "kiji install --kiji={kiji_uri}" ).format(kiji_uri=self.kiji_uri) self._run_kiji_job(cmd) for ddl in self.ddls: ddl_full_path = os.path.join(self.movie_advisor_home, ddl) assert os.path.isfile(ddl_full_path) cmd = "{schema_shell} --kiji={kiji_uri} --file={ddl_full_path}".format( schema_shell=schema_shell, kiji_uri=self.kiji_uri, ddl_full_path=ddl_full_path) self._run_kiji_job(cmd) def _do_action_calculate_similarity_cosine_express(self): """ Run the cosine similarity calculator with the appropriate time range. """ self._run_express_job("com.directv.recommend.express.CosineCFTrainer") self._scan_table("content/item_item_similarities") def _do_action_import_ratings(self): """ Import the movie ratings with an Express job. """ self._run_express_job( "org.kiji.tutorial.load.MovieRatingsImporter", options="--ratings ml-100k/u.data" ) self._scan_table("users") def _do_action_import_user_info(self): """ Import the user metadata with an Express job. """ self._run_express_job( "org.kiji.tutorial.load.UserInfoImporter", options="--user-info ml-100k/u.user" ) self._scan_table("users") def _do_action_import_movie_info(self): """ Import the movie metadata with an Express job. """ self._run_express_job( "org.kiji.tutorial.load.MovieInfoImporter", options="--movie-info ml-100k/u.item" ) self._scan_table("movies") def _do_action_train(self): """ Import the movie metadata with an Express job. """ self._run_express_job( "org.kiji.tutorial.train.ItemSimilarityCalculator" ) self._scan_table("movies") def _do_action_register_freshener(self): """ Register the score function! """ policy = "org.kiji.scoring.lib.AlwaysFreshen" score_function = "org.kiji.tutorial.scoring.MovieRecommendationScoreFunction" params = \ '{org.kiji.tutorial.scoring.MovieRecommendationScoreFunction.kvstore_table_uri:"%s/movies/"}' % self.kiji_uri target = self.kiji_uri + "/users/recommendations:foo" self._run_kiji_job( "kiji fresh --do=register --policy-class=%s --score-function-class=%s --parameters='%s' --target=%s" % ( policy, score_function, params, target ) ) def _run_actions(self): """ Run whatever actions the user has specified """ if "install-bento" in self.actions: self._do_action_bento_setup() if "create-tables" in self.actions: self._do_action_tables_create() if "import-ratings" in self.actions: self._do_action_import_ratings() if "import-user-info" in self.actions: self._do_action_import_user_info() if "import-movie-info" in self.actions: self._do_action_import_movie_info() if "train-item-item-cf" in self.actions: self._do_action_train() if "register-freshener" in self.actions: self._do_action_register_freshener() def go(self, args): self._parse_options(args) self._run_actions() if __name__ == "__main__": MovieAdvisorManager().go(sys.argv[1:])
print(cmd) try: res = subprocess.check_output(cmd, shell=True).decode('utf-8') except subprocess.CalledProcessError as cpe: print("Error runn command " + cmd) print("Output = " + cpe.output.decode('utf-8')) raise cpe return res
identifier_body
manager.py
""" Python script to manage running jobs, checking the results, etc. """ import argparse import os import pprint import subprocess import sys from argparse import RawTextHelpFormatter def run(cmd): print(cmd) try: res = subprocess.check_output(cmd, shell=True).decode('utf-8') except subprocess.CalledProcessError as cpe: print("Error runn command " + cmd) print("Output = " + cpe.output.decode('utf-8')) raise cpe return res class TimeInterval: def __init__(self, start, end): """ TODO: Check times formatting properly here """ self.start = start self.end = end class MovieAdvisorManager: # Put these into a sensible order possible_actions = [ 'help-actions', 'install-bento', 'create-tables', 'import-ratings', 'import-user-info', 'import-movie-info', 'train-item-item-cf', 'register-freshener', ] actions_help = { "install-bento": "Will set up a bento box for you. Assumes that you are in a directory with a tar.gz " "file for the latest bento build. This command will rm -rf your current bento box " "(which is also assumed to be in your current directory).", "create-tables": "Deletes the currently existing user and content tables and recreates them from DDL.", "import-ratings": "Run an express job to import movie ratings", "import-user-info": "Run an express job to import user information", "import-movie-info": "Run an express job to import movie information", "train-item-item-cf": "Calculate item-item similarities", "register-freshener": "Register freshener for scoring function", "help-actions": "Print this help", } jars = ( # 'train/target/train-1.0-SNAPSHOT-jar-with-dependencies.jar', #'schema/target/schema-1.0-SNAPSHOT-jar-with-dependencies.jar', 'avro/target/movie-advisor-avro-1.0-SNAPSHOT.jar', # Needed on classpath for registering the scoring function and executing it. 'movie-advisor-scoring/target/movie-advisor-scoring-1.0-SNAPSHOT.jar' ) # This is the path from movie_advisor_home ddls = ( 'layout/src/main/resources/users.ddl', 'layout/src/main/resources/movies.ddl', ) express_jar = 'express/target/express-1.0-SNAPSHOT.jar' # assert set(actions_help.keys()) == set(possible_actions) def _help_actions(self): """ Print detailed information about how the different actions work """ actions_str = "" for (key, value) in self.actions_help.items(): actions_str += "command: %s\n%s\n\n" % (key, value) print(actions_str) sys.exit(0) def _setup_parser(self): """ Add actions for the command-line arguments parser """ parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description="Manage Kiji stuff for MovieAdvisor. Available actions:\n\t" + \ "\n\t".join(self.possible_actions)) # TODO: Detailed help information that prints out all of the available actions and their # assumptions parser.add_argument( "action", nargs='*', help="Action to take") parser.add_argument( '--bento-home', help='Location of bento box', default='kiji-bento-ebi') parser.add_argument( '--bento-tgz', help='Bento TAR file name', default='kiji-bento-ebi-2.0.2-release.tar.gz') parser.add_argument( '--movie-advisor-home', help='Location of checkout of WibiData MovieAdvisor github repo', default='movie-advisor') # Set up dates for training, testing, etc. parser.add_argument( '--train-start-date', default='2013-11-01') parser.add_argument( '--train-end-date', default='2013-11-15') parser.add_argument( '--test-start-date', default='2013-11-16') parser.add_argument( '--test-end-date', default='2013-11-30') parser.add_argument( "--backtest-results-file", default="backtest.txt") parser.add_argument( "--kill-bento", action="store_true", default=False, help="Automatically kill existing BentoBox processes.") parser.add_argument( "--show-classpath", action="store_true", default=False, help="Echo $KIJI_CLASSPATH and exit") return parser def _setup_environment_vars(self, opts): """ Set up useful variables (would be environment vars outside of the script) """ # Check that these directories actually exist assert os.path.isdir(opts.movie_advisor_home) #if not 'install-bento' in self.actions: assert os.path.isdir(opts.bento_home) self.movie_advisor_home = opts.movie_advisor_home self.bento_home = opts.bento_home self.bento_tgz = opts.bento_tgz self.kiji_uri = "kiji://.env/tutorial" # "express job" takes a jar file as an argument assert os.path.isfile(os.path.join(self.movie_advisor_home, self.express_jar)) # Set the classpath for all of the commands that we'll run jarsFullPaths = [os.path.join(self.movie_advisor_home, j) for j in self.jars] for jar in jarsFullPaths: assert os.path.isfile(jar) classpath = ":".join(jarsFullPaths) os.environ['KIJI_CLASSPATH'] = classpath if opts.show_classpath: print("export KIJI_CLASSPATH=%s" % classpath) sys.exit(0) def _parse_options(self, args): """ Parse the command-line options and configure the script appropriately """ parser = self._setup_parser() opts = parser.parse_args(args) self.actions = opts.action for action in self.actions: assert action in self.possible_actions, "Action %s is not a known action for the script" % action self.b_kill_bento = opts.kill_bento if 'help-actions' in self.actions: self._help_actions() self._setup_environment_vars(opts) self.backtest_results_file = opts.backtest_results_file def _exit_if_bento_still_running(self): jps_results = run('jps') if jps_results.lower().find('minicluster') != -1 and not self.b_kill_bento: assert False, "Please kill all bento-related jobs (run 'jps' to get a list)" # Kill all of the bento processes for line in jps_results.splitlines(): toks = line.split() if len(toks) == 1: continue assert len(toks) == 2, toks (pid, job) = toks if job == 'Jps': continue cmd = "kill -9 " + pid run(cmd) def _do_action_bento_setup(self): """ Install the BentoBox, install Kiji, etc. """ self._exit_if_bento_still_running() cmd = "rm -rf {bento_dir}; tar -zxvf {bento_tar}".format( bento_dir=self.bento_home, bento_tar=self.bento_tgz) print(run(cmd)) for command_suffix in ["-env.sh", ""]: kiji_env = os.path.join(self.bento_home, "bin", "kiji" + command_suffix) bento_env = os.path.join(self.bento_home, "bin", "bento" + command_suffix) if not os.path.isfile(kiji_env): assert os.path.isfile(bento_env) cmd = 'cp {bento_env} {kiji_env}'.format( bento_env=bento_env, kiji_env=kiji_env) run(cmd) cmd = "cd {bento_dir}; source bin/kiji-env.sh; bento start".format( bento_dir=self.bento_home, ) print(run(cmd)) assert os.path.isdir(self.bento_home) def
(self, class_name, options=""): """ Run any express job. Handles a lot of boilerplate for all of the Directv jobs (specifying dates, kiji table, etc. """ cmd = "source {bento_home}/bin/kiji-env.sh; express job {jar} {myclass} --kiji {kiji_uri}" cmd = cmd.format( bento_home=self.bento_home, jar=os.path.join(self.movie_advisor_home, self.express_jar), myclass=class_name, kiji_uri=self.kiji_uri, ) + " " + options print(run(cmd)) def _run_kiji_job(self, cmd): cmd = "source {bento_home}/bin/kiji-env.sh; {cmd}".format( bento_home=self.bento_home, cmd=cmd) print(run(cmd)) def _scan_table(self, uri): """ Scan this table and print out a couple of rows as a sanity check """ cmd = 'kiji scan {kiji_uri}/{uri} --max-versions=10'.format( kiji_uri=self.kiji_uri, uri=uri) self._run_kiji_job(cmd) def _do_action_tables_create(self): """ Run the schema shell to create the tables """ schema_shell = os.path.join(self.bento_home, "schema-shell", "bin", "kiji-schema-shell") assert os.path.isfile(schema_shell), schema_shell # Delete the table first! cmd = ( "kiji delete --target={kiji_uri} --interactive=false; " + "kiji install --kiji={kiji_uri}" ).format(kiji_uri=self.kiji_uri) self._run_kiji_job(cmd) for ddl in self.ddls: ddl_full_path = os.path.join(self.movie_advisor_home, ddl) assert os.path.isfile(ddl_full_path) cmd = "{schema_shell} --kiji={kiji_uri} --file={ddl_full_path}".format( schema_shell=schema_shell, kiji_uri=self.kiji_uri, ddl_full_path=ddl_full_path) self._run_kiji_job(cmd) def _do_action_calculate_similarity_cosine_express(self): """ Run the cosine similarity calculator with the appropriate time range. """ self._run_express_job("com.directv.recommend.express.CosineCFTrainer") self._scan_table("content/item_item_similarities") def _do_action_import_ratings(self): """ Import the movie ratings with an Express job. """ self._run_express_job( "org.kiji.tutorial.load.MovieRatingsImporter", options="--ratings ml-100k/u.data" ) self._scan_table("users") def _do_action_import_user_info(self): """ Import the user metadata with an Express job. """ self._run_express_job( "org.kiji.tutorial.load.UserInfoImporter", options="--user-info ml-100k/u.user" ) self._scan_table("users") def _do_action_import_movie_info(self): """ Import the movie metadata with an Express job. """ self._run_express_job( "org.kiji.tutorial.load.MovieInfoImporter", options="--movie-info ml-100k/u.item" ) self._scan_table("movies") def _do_action_train(self): """ Import the movie metadata with an Express job. """ self._run_express_job( "org.kiji.tutorial.train.ItemSimilarityCalculator" ) self._scan_table("movies") def _do_action_register_freshener(self): """ Register the score function! """ policy = "org.kiji.scoring.lib.AlwaysFreshen" score_function = "org.kiji.tutorial.scoring.MovieRecommendationScoreFunction" params = \ '{org.kiji.tutorial.scoring.MovieRecommendationScoreFunction.kvstore_table_uri:"%s/movies/"}' % self.kiji_uri target = self.kiji_uri + "/users/recommendations:foo" self._run_kiji_job( "kiji fresh --do=register --policy-class=%s --score-function-class=%s --parameters='%s' --target=%s" % ( policy, score_function, params, target ) ) def _run_actions(self): """ Run whatever actions the user has specified """ if "install-bento" in self.actions: self._do_action_bento_setup() if "create-tables" in self.actions: self._do_action_tables_create() if "import-ratings" in self.actions: self._do_action_import_ratings() if "import-user-info" in self.actions: self._do_action_import_user_info() if "import-movie-info" in self.actions: self._do_action_import_movie_info() if "train-item-item-cf" in self.actions: self._do_action_train() if "register-freshener" in self.actions: self._do_action_register_freshener() def go(self, args): self._parse_options(args) self._run_actions() if __name__ == "__main__": MovieAdvisorManager().go(sys.argv[1:])
_run_express_job
identifier_name
manager.py
""" Python script to manage running jobs, checking the results, etc. """ import argparse import os import pprint import subprocess import sys from argparse import RawTextHelpFormatter def run(cmd): print(cmd) try: res = subprocess.check_output(cmd, shell=True).decode('utf-8') except subprocess.CalledProcessError as cpe: print("Error runn command " + cmd) print("Output = " + cpe.output.decode('utf-8')) raise cpe return res class TimeInterval: def __init__(self, start, end): """ TODO: Check times formatting properly here """ self.start = start self.end = end class MovieAdvisorManager: # Put these into a sensible order possible_actions = [ 'help-actions', 'install-bento', 'create-tables', 'import-ratings', 'import-user-info', 'import-movie-info', 'train-item-item-cf', 'register-freshener', ] actions_help = { "install-bento": "Will set up a bento box for you. Assumes that you are in a directory with a tar.gz " "file for the latest bento build. This command will rm -rf your current bento box " "(which is also assumed to be in your current directory).", "create-tables": "Deletes the currently existing user and content tables and recreates them from DDL.", "import-ratings": "Run an express job to import movie ratings", "import-user-info": "Run an express job to import user information", "import-movie-info": "Run an express job to import movie information", "train-item-item-cf": "Calculate item-item similarities", "register-freshener": "Register freshener for scoring function", "help-actions": "Print this help", } jars = ( # 'train/target/train-1.0-SNAPSHOT-jar-with-dependencies.jar', #'schema/target/schema-1.0-SNAPSHOT-jar-with-dependencies.jar', 'avro/target/movie-advisor-avro-1.0-SNAPSHOT.jar', # Needed on classpath for registering the scoring function and executing it. 'movie-advisor-scoring/target/movie-advisor-scoring-1.0-SNAPSHOT.jar' ) # This is the path from movie_advisor_home ddls = ( 'layout/src/main/resources/users.ddl', 'layout/src/main/resources/movies.ddl', ) express_jar = 'express/target/express-1.0-SNAPSHOT.jar' # assert set(actions_help.keys()) == set(possible_actions) def _help_actions(self): """ Print detailed information about how the different actions work """ actions_str = "" for (key, value) in self.actions_help.items(): actions_str += "command: %s\n%s\n\n" % (key, value) print(actions_str) sys.exit(0) def _setup_parser(self): """ Add actions for the command-line arguments parser """ parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description="Manage Kiji stuff for MovieAdvisor. Available actions:\n\t" + \ "\n\t".join(self.possible_actions)) # TODO: Detailed help information that prints out all of the available actions and their # assumptions parser.add_argument( "action", nargs='*', help="Action to take") parser.add_argument( '--bento-home', help='Location of bento box', default='kiji-bento-ebi') parser.add_argument( '--bento-tgz', help='Bento TAR file name', default='kiji-bento-ebi-2.0.2-release.tar.gz') parser.add_argument( '--movie-advisor-home', help='Location of checkout of WibiData MovieAdvisor github repo', default='movie-advisor') # Set up dates for training, testing, etc. parser.add_argument( '--train-start-date', default='2013-11-01') parser.add_argument( '--train-end-date', default='2013-11-15') parser.add_argument( '--test-start-date', default='2013-11-16') parser.add_argument( '--test-end-date', default='2013-11-30') parser.add_argument( "--backtest-results-file", default="backtest.txt") parser.add_argument( "--kill-bento", action="store_true", default=False, help="Automatically kill existing BentoBox processes.") parser.add_argument( "--show-classpath", action="store_true", default=False, help="Echo $KIJI_CLASSPATH and exit") return parser def _setup_environment_vars(self, opts): """ Set up useful variables (would be environment vars outside of the script) """
#if not 'install-bento' in self.actions: assert os.path.isdir(opts.bento_home) self.movie_advisor_home = opts.movie_advisor_home self.bento_home = opts.bento_home self.bento_tgz = opts.bento_tgz self.kiji_uri = "kiji://.env/tutorial" # "express job" takes a jar file as an argument assert os.path.isfile(os.path.join(self.movie_advisor_home, self.express_jar)) # Set the classpath for all of the commands that we'll run jarsFullPaths = [os.path.join(self.movie_advisor_home, j) for j in self.jars] for jar in jarsFullPaths: assert os.path.isfile(jar) classpath = ":".join(jarsFullPaths) os.environ['KIJI_CLASSPATH'] = classpath if opts.show_classpath: print("export KIJI_CLASSPATH=%s" % classpath) sys.exit(0) def _parse_options(self, args): """ Parse the command-line options and configure the script appropriately """ parser = self._setup_parser() opts = parser.parse_args(args) self.actions = opts.action for action in self.actions: assert action in self.possible_actions, "Action %s is not a known action for the script" % action self.b_kill_bento = opts.kill_bento if 'help-actions' in self.actions: self._help_actions() self._setup_environment_vars(opts) self.backtest_results_file = opts.backtest_results_file def _exit_if_bento_still_running(self): jps_results = run('jps') if jps_results.lower().find('minicluster') != -1 and not self.b_kill_bento: assert False, "Please kill all bento-related jobs (run 'jps' to get a list)" # Kill all of the bento processes for line in jps_results.splitlines(): toks = line.split() if len(toks) == 1: continue assert len(toks) == 2, toks (pid, job) = toks if job == 'Jps': continue cmd = "kill -9 " + pid run(cmd) def _do_action_bento_setup(self): """ Install the BentoBox, install Kiji, etc. """ self._exit_if_bento_still_running() cmd = "rm -rf {bento_dir}; tar -zxvf {bento_tar}".format( bento_dir=self.bento_home, bento_tar=self.bento_tgz) print(run(cmd)) for command_suffix in ["-env.sh", ""]: kiji_env = os.path.join(self.bento_home, "bin", "kiji" + command_suffix) bento_env = os.path.join(self.bento_home, "bin", "bento" + command_suffix) if not os.path.isfile(kiji_env): assert os.path.isfile(bento_env) cmd = 'cp {bento_env} {kiji_env}'.format( bento_env=bento_env, kiji_env=kiji_env) run(cmd) cmd = "cd {bento_dir}; source bin/kiji-env.sh; bento start".format( bento_dir=self.bento_home, ) print(run(cmd)) assert os.path.isdir(self.bento_home) def _run_express_job(self, class_name, options=""): """ Run any express job. Handles a lot of boilerplate for all of the Directv jobs (specifying dates, kiji table, etc. """ cmd = "source {bento_home}/bin/kiji-env.sh; express job {jar} {myclass} --kiji {kiji_uri}" cmd = cmd.format( bento_home=self.bento_home, jar=os.path.join(self.movie_advisor_home, self.express_jar), myclass=class_name, kiji_uri=self.kiji_uri, ) + " " + options print(run(cmd)) def _run_kiji_job(self, cmd): cmd = "source {bento_home}/bin/kiji-env.sh; {cmd}".format( bento_home=self.bento_home, cmd=cmd) print(run(cmd)) def _scan_table(self, uri): """ Scan this table and print out a couple of rows as a sanity check """ cmd = 'kiji scan {kiji_uri}/{uri} --max-versions=10'.format( kiji_uri=self.kiji_uri, uri=uri) self._run_kiji_job(cmd) def _do_action_tables_create(self): """ Run the schema shell to create the tables """ schema_shell = os.path.join(self.bento_home, "schema-shell", "bin", "kiji-schema-shell") assert os.path.isfile(schema_shell), schema_shell # Delete the table first! cmd = ( "kiji delete --target={kiji_uri} --interactive=false; " + "kiji install --kiji={kiji_uri}" ).format(kiji_uri=self.kiji_uri) self._run_kiji_job(cmd) for ddl in self.ddls: ddl_full_path = os.path.join(self.movie_advisor_home, ddl) assert os.path.isfile(ddl_full_path) cmd = "{schema_shell} --kiji={kiji_uri} --file={ddl_full_path}".format( schema_shell=schema_shell, kiji_uri=self.kiji_uri, ddl_full_path=ddl_full_path) self._run_kiji_job(cmd) def _do_action_calculate_similarity_cosine_express(self): """ Run the cosine similarity calculator with the appropriate time range. """ self._run_express_job("com.directv.recommend.express.CosineCFTrainer") self._scan_table("content/item_item_similarities") def _do_action_import_ratings(self): """ Import the movie ratings with an Express job. """ self._run_express_job( "org.kiji.tutorial.load.MovieRatingsImporter", options="--ratings ml-100k/u.data" ) self._scan_table("users") def _do_action_import_user_info(self): """ Import the user metadata with an Express job. """ self._run_express_job( "org.kiji.tutorial.load.UserInfoImporter", options="--user-info ml-100k/u.user" ) self._scan_table("users") def _do_action_import_movie_info(self): """ Import the movie metadata with an Express job. """ self._run_express_job( "org.kiji.tutorial.load.MovieInfoImporter", options="--movie-info ml-100k/u.item" ) self._scan_table("movies") def _do_action_train(self): """ Import the movie metadata with an Express job. """ self._run_express_job( "org.kiji.tutorial.train.ItemSimilarityCalculator" ) self._scan_table("movies") def _do_action_register_freshener(self): """ Register the score function! """ policy = "org.kiji.scoring.lib.AlwaysFreshen" score_function = "org.kiji.tutorial.scoring.MovieRecommendationScoreFunction" params = \ '{org.kiji.tutorial.scoring.MovieRecommendationScoreFunction.kvstore_table_uri:"%s/movies/"}' % self.kiji_uri target = self.kiji_uri + "/users/recommendations:foo" self._run_kiji_job( "kiji fresh --do=register --policy-class=%s --score-function-class=%s --parameters='%s' --target=%s" % ( policy, score_function, params, target ) ) def _run_actions(self): """ Run whatever actions the user has specified """ if "install-bento" in self.actions: self._do_action_bento_setup() if "create-tables" in self.actions: self._do_action_tables_create() if "import-ratings" in self.actions: self._do_action_import_ratings() if "import-user-info" in self.actions: self._do_action_import_user_info() if "import-movie-info" in self.actions: self._do_action_import_movie_info() if "train-item-item-cf" in self.actions: self._do_action_train() if "register-freshener" in self.actions: self._do_action_register_freshener() def go(self, args): self._parse_options(args) self._run_actions() if __name__ == "__main__": MovieAdvisorManager().go(sys.argv[1:])
# Check that these directories actually exist assert os.path.isdir(opts.movie_advisor_home)
random_line_split
reader_test.go
// Copyright 2018 Northern.tech AS // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package areader import ( "archive/tar" "bytes" "io" "io/ioutil" "os" "testing" "github.com/mendersoftware/mender-artifact/artifact" "github.com/mendersoftware/mender-artifact/awriter" "github.com/mendersoftware/mender-artifact/handlers" "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) const ( TestUpdateFileContent = "test update" PublicKey = `-----BEGIN PUBLIC KEY----- MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDSTLzZ9hQq3yBB+dMDVbKem6ia v1J6opg6DICKkQ4M/yhlw32BCGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKc XwaUNml5EhW79AdibBXZiZt8fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne 5vbA+63vRCnrc8QuYwIDAQAB -----END PUBLIC KEY-----` PrivateKey = `-----BEGIN RSA PRIVATE KEY----- MIICXAIBAAKBgQDSTLzZ9hQq3yBB+dMDVbKem6iav1J6opg6DICKkQ4M/yhlw32B CGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKcXwaUNml5EhW79AdibBXZiZt8 fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne5vbA+63vRCnrc8QuYwIDAQAB AoGAQKIRELQOsrZsxZowfj/ia9jPUvAmO0apnn2lK/E07k2lbtFMS1H4m1XtGr8F oxQU7rLyyP/FmeJUqJyRXLwsJzma13OpxkQtZmRpL9jEwevnunHYJfceVapQOJ7/ 6Oz0pPWEq39GCn+tTMtgSmkEaSH8Ki9t32g9KuQIKBB2hbECQQDsg7D5fHQB1BXG HJm9JmYYX0Yk6Z2SWBr4mLO0C4hHBnV5qPCLyevInmaCV2cOjDZ5Sz6iF5RK5mw7 qzvFa8ePAkEA46Anom3cNXO5pjfDmn2CoqUvMeyrJUFL5aU6W1S6iFprZ/YwdHcC kS5yTngwVOmcnT65Vnycygn+tZan2A0h7QJBAJNlowZovDdjgEpeCqXp51irD6Dz gsLwa6agK+Y6Ba0V5mJyma7UoT//D62NYOmdElnXPepwvXdMUQmCtpZbjBsCQD5H VHDJlCV/yzyiJz9+tZ5giaAkO9NOoUBsy6GvdfXWn2prXmiPI0GrrpSvp7Gj1Tjk r3rtT0ysHWd7l+Kx/SUCQGlitd5RDfdHl+gKrCwhNnRG7FzRLv5YOQV81+kh7SkU 73TXPIqLESVrqWKDfLwfsfEpV248MSRou+y0O1mtFpo= -----END RSA PRIVATE KEY----- ` PublicKeyError = `-----BEGIN PUBLIC KEY----- MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDSTLzZ9hQq3yBB+dMDVbKem6ia v1J6opg6DICKkQ4M/yhlw32BCGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKc XwaUNml5EhW79AdibBXZiZt8fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne 5vbA+63vRCnrc8QuYwIDAQAC -----END PUBLIC KEY-----` ) func MakeRootfsImageArtifact(version int, signed bool, hasScripts bool) (io.Reader, error) { upd, err := MakeFakeUpdate(TestUpdateFileContent) if err != nil { return nil, err } defer os.Remove(upd) comp := artifact.NewCompressorGzip() art := bytes.NewBuffer(nil) var aw *awriter.Writer if !signed { aw = awriter.NewWriter(art, comp) } else { s := artifact.NewSigner([]byte(PrivateKey)) aw = awriter.NewWriterSigned(art, comp, s) } var u handlers.Composer switch version { case 1: u = handlers.NewRootfsV1(upd, comp) case 2: u = handlers.NewRootfsV2(upd, comp) } scr := artifact.Scripts{} if hasScripts { s, err := ioutil.TempFile("", "ArtifactInstall_Enter_10_") if err != nil { return nil, err } defer os.Remove(s.Name()) _, err = io.WriteString(s, "execute me!") if err := scr.Add(s.Name()); err != nil { return nil, err } } updates := &awriter.Updates{U: []handlers.Composer{u}} err = aw.WriteArtifact("mender", version, []string{"vexpress"}, "mender-1.1", updates, &scr) if err != nil { return nil, err } return art, nil } func TestReadArtifact(t *testing.T) { updFileContent := bytes.NewBuffer(nil) copy := func(r io.Reader, f *handlers.DataFile) error { _, err := io.Copy(updFileContent, r) return err } rfh := handlers.NewRootfsInstaller() rfh.InstallHandler = copy tc := []struct { version int signed bool handler handlers.Installer verifier artifact.Verifier readError error }{ {1, false, rfh, nil, nil}, {2, false, rfh, nil, nil}, {2, true, rfh, artifact.NewVerifier([]byte(PublicKey)), nil}, {2, true, rfh, artifact.NewVerifier([]byte(PublicKeyError)), errors.New("reader: invalid signature: crypto/rsa: verification error")}, // // test that we do not need a verifier for signed artifact {2, true, rfh, nil, nil}, } // first create archive, that we will be able to read for _, test := range tc { art, err := MakeRootfsImageArtifact(test.version, test.signed, false) assert.NoError(t, err) aReader := NewReader(art) if test.handler != nil { aReader.RegisterHandler(test.handler) } if test.verifier != nil { aReader.VerifySignatureCallback = test.verifier.Verify } err = aReader.ReadArtifact() if test.readError != nil { assert.Equal(t, test.readError.Error(), err.Error()) continue } assert.NoError(t, err) assert.Equal(t, TestUpdateFileContent, updFileContent.String()) devComp := aReader.GetCompatibleDevices() assert.Len(t, devComp, 1) assert.Equal(t, "vexpress", devComp[0]) if test.handler != nil { assert.Len(t, aReader.GetHandlers(), 1) assert.Equal(t, test.handler.GetType(), aReader.GetHandlers()[0].GetType()) } assert.Equal(t, "mender-1.1", aReader.GetArtifactName()) // clean the buffer updFileContent.Reset() } } func TestReadSigned(t *testing.T) { art, err := MakeRootfsImageArtifact(2, true, false) assert.NoError(t, err) aReader := NewReaderSigned(art) err = aReader.ReadArtifact() assert.Error(t, err) assert.Contains(t, err.Error(), "reader: verify signature callback not registered") art, err = MakeRootfsImageArtifact(2, false, false) assert.NoError(t, err) aReader = NewReaderSigned(art) err = aReader.ReadArtifact() assert.Error(t, err) assert.Contains(t, err.Error(), "reader: expecting signed artifact, but no signature file found") art, err = MakeRootfsImageArtifact(2, true, false) assert.NoError(t, err) aReader = NewReader(art) err = aReader.ReadArtifact() assert.NoError(t, err) art, err = MakeRootfsImageArtifact(1, false, false) assert.NoError(t, err) aReader = NewReaderSigned(art) err = aReader.ReadArtifact() assert.Error(t, err) assert.Contains(t, err.Error(), "reader: expecting signed artifact") } func TestRegisterMultipleHandlers(t *testing.T) { aReader := NewReader(nil) err := aReader.RegisterHandler(handlers.NewRootfsInstaller()) assert.NoError(t, err) err = aReader.RegisterHandler(handlers.NewRootfsInstaller()) assert.Error(t, err) err = aReader.RegisterHandler(nil) assert.Error(t, err) } func TestReadNoHandler(t *testing.T) { art, err := MakeRootfsImageArtifact(1, false, false) assert.NoError(t, err) aReader := NewReader(art)
err = aReader.ReadArtifact() assert.NoError(t, err) assert.Len(t, aReader.GetHandlers(), 1) assert.Equal(t, "rootfs-image", aReader.GetHandlers()[0].GetType()) } func TestReadBroken(t *testing.T) { broken := []byte("this is broken artifact") buf := bytes.NewBuffer(broken) aReader := NewReader(buf) err := aReader.ReadArtifact() assert.Error(t, err) aReader = NewReader(nil) err = aReader.ReadArtifact() assert.Error(t, err) } func TestReadWithScripts(t *testing.T) { art, err := MakeRootfsImageArtifact(2, false, true) assert.NoError(t, err) aReader := NewReader(art) noExec := 0 aReader.ScriptsReadCallback = func(r io.Reader, info os.FileInfo) error { noExec++ assert.Contains(t, info.Name(), "ArtifactInstall_Enter_10_") buf := bytes.NewBuffer(nil) _, err = io.Copy(buf, r) assert.NoError(t, err) assert.Equal(t, "execute me!", buf.String()) return nil } err = aReader.ReadArtifact() assert.NoError(t, err) assert.Equal(t, 1, noExec) } func MakeFakeUpdate(data string) (string, error) { f, err := ioutil.TempFile("", "test_update") if err != nil { return "", err } defer f.Close() if len(data) > 0 { if _, err := f.WriteString(data); err != nil { return "", err } } return f.Name(), nil } type installer struct { Data *handlers.DataFile } func (i *installer) GetUpdateFiles() [](*handlers.DataFile) { return [](*handlers.DataFile){i.Data} } func (i *installer) GetType() string { return "" } func (i *installer) Copy() handlers.Installer { return i } func (i *installer) ReadHeader(r io.Reader, path string) error { return nil } func (i *installer) Install(r io.Reader, info *os.FileInfo) error { _, err := io.Copy(ioutil.Discard, r) return err } func writeDataFile(t *testing.T, name, data string) io.Reader { comp := artifact.NewCompressorGzip() buf := bytes.NewBuffer(nil) gz, err := comp.NewWriter(buf) assert.NoError(t, err) tw := tar.NewWriter(gz) sw := artifact.NewTarWriterStream(tw) err = sw.Write([]byte(data), name) assert.NoError(t, err) err = tw.Close() assert.NoError(t, err) err = gz.Close() assert.NoError(t, err) return buf } func TestReadAndInstall(t *testing.T) { comp := artifact.NewCompressorGzip() err := readAndInstall(bytes.NewBuffer(nil), nil, nil, 1, comp) assert.Error(t, err) assert.Equal(t, "EOF", errors.Cause(err).Error()) i := &installer{ Data: &handlers.DataFile{ Name: "update.ext4", // this is a calculated checksum of `data` string Checksum: []byte("3a6eb0790f39ac87c94f3856b2dd2c5d110e6811602261a9a923d3bb23adc8b7"), }, } r := writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.NoError(t, err) assert.Equal(t, int64(len("data")), i.GetUpdateFiles()[0].Size) // test missing data file i = &installer{ Data: &handlers.DataFile{ Name: "non-existing", }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.Error(t, err) assert.Equal(t, "update: can not find data file: update.ext4", errors.Cause(err).Error()) // test missing checksum i = &installer{ Data: &handlers.DataFile{ Name: "update.ext4", }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.Error(t, err) assert.Equal(t, "update: checksum missing for file: update.ext4", errors.Cause(err).Error()) // test with manifest m := artifact.NewChecksumStore() i = &installer{ Data: &handlers.DataFile{ Name: "update.ext4", Checksum: []byte("3a6eb0790f39ac87c94f3856b2dd2c5d110e6811602261a9a923d3bb23adc8b7"), }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, m, 1, comp) assert.Error(t, err) assert.Contains(t, errors.Cause(err).Error(), "checksum missing") // test invalid checksum i = &installer{ Data: &handlers.DataFile{ Name: "update.ext4", Checksum: []byte("12121212121212"), }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.Error(t, err) assert.Contains(t, errors.Cause(err).Error(), "invalid checksum") // test with manifest err = m.Add("update.ext4", []byte("3a6eb0790f39ac87c94f3856b2dd2c5d110e6811602261a9a923d3bb23adc8b7")) assert.NoError(t, err) r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, m, 1, comp) assert.Error(t, err) assert.Contains(t, errors.Cause(err).Error(), "checksum missing") }
random_line_split
reader_test.go
// Copyright 2018 Northern.tech AS // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package areader import ( "archive/tar" "bytes" "io" "io/ioutil" "os" "testing" "github.com/mendersoftware/mender-artifact/artifact" "github.com/mendersoftware/mender-artifact/awriter" "github.com/mendersoftware/mender-artifact/handlers" "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) const ( TestUpdateFileContent = "test update" PublicKey = `-----BEGIN PUBLIC KEY----- MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDSTLzZ9hQq3yBB+dMDVbKem6ia v1J6opg6DICKkQ4M/yhlw32BCGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKc XwaUNml5EhW79AdibBXZiZt8fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne 5vbA+63vRCnrc8QuYwIDAQAB -----END PUBLIC KEY-----` PrivateKey = `-----BEGIN RSA PRIVATE KEY----- MIICXAIBAAKBgQDSTLzZ9hQq3yBB+dMDVbKem6iav1J6opg6DICKkQ4M/yhlw32B CGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKcXwaUNml5EhW79AdibBXZiZt8 fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne5vbA+63vRCnrc8QuYwIDAQAB AoGAQKIRELQOsrZsxZowfj/ia9jPUvAmO0apnn2lK/E07k2lbtFMS1H4m1XtGr8F oxQU7rLyyP/FmeJUqJyRXLwsJzma13OpxkQtZmRpL9jEwevnunHYJfceVapQOJ7/ 6Oz0pPWEq39GCn+tTMtgSmkEaSH8Ki9t32g9KuQIKBB2hbECQQDsg7D5fHQB1BXG HJm9JmYYX0Yk6Z2SWBr4mLO0C4hHBnV5qPCLyevInmaCV2cOjDZ5Sz6iF5RK5mw7 qzvFa8ePAkEA46Anom3cNXO5pjfDmn2CoqUvMeyrJUFL5aU6W1S6iFprZ/YwdHcC kS5yTngwVOmcnT65Vnycygn+tZan2A0h7QJBAJNlowZovDdjgEpeCqXp51irD6Dz gsLwa6agK+Y6Ba0V5mJyma7UoT//D62NYOmdElnXPepwvXdMUQmCtpZbjBsCQD5H VHDJlCV/yzyiJz9+tZ5giaAkO9NOoUBsy6GvdfXWn2prXmiPI0GrrpSvp7Gj1Tjk r3rtT0ysHWd7l+Kx/SUCQGlitd5RDfdHl+gKrCwhNnRG7FzRLv5YOQV81+kh7SkU 73TXPIqLESVrqWKDfLwfsfEpV248MSRou+y0O1mtFpo= -----END RSA PRIVATE KEY----- ` PublicKeyError = `-----BEGIN PUBLIC KEY----- MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDSTLzZ9hQq3yBB+dMDVbKem6ia v1J6opg6DICKkQ4M/yhlw32BCGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKc XwaUNml5EhW79AdibBXZiZt8fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne 5vbA+63vRCnrc8QuYwIDAQAC -----END PUBLIC KEY-----` ) func MakeRootfsImageArtifact(version int, signed bool, hasScripts bool) (io.Reader, error) { upd, err := MakeFakeUpdate(TestUpdateFileContent) if err != nil { return nil, err } defer os.Remove(upd) comp := artifact.NewCompressorGzip() art := bytes.NewBuffer(nil) var aw *awriter.Writer if !signed { aw = awriter.NewWriter(art, comp) } else { s := artifact.NewSigner([]byte(PrivateKey)) aw = awriter.NewWriterSigned(art, comp, s) } var u handlers.Composer switch version { case 1: u = handlers.NewRootfsV1(upd, comp) case 2: u = handlers.NewRootfsV2(upd, comp) } scr := artifact.Scripts{} if hasScripts { s, err := ioutil.TempFile("", "ArtifactInstall_Enter_10_") if err != nil { return nil, err } defer os.Remove(s.Name()) _, err = io.WriteString(s, "execute me!") if err := scr.Add(s.Name()); err != nil { return nil, err } } updates := &awriter.Updates{U: []handlers.Composer{u}} err = aw.WriteArtifact("mender", version, []string{"vexpress"}, "mender-1.1", updates, &scr) if err != nil { return nil, err } return art, nil } func TestReadArtifact(t *testing.T) { updFileContent := bytes.NewBuffer(nil) copy := func(r io.Reader, f *handlers.DataFile) error { _, err := io.Copy(updFileContent, r) return err } rfh := handlers.NewRootfsInstaller() rfh.InstallHandler = copy tc := []struct { version int signed bool handler handlers.Installer verifier artifact.Verifier readError error }{ {1, false, rfh, nil, nil}, {2, false, rfh, nil, nil}, {2, true, rfh, artifact.NewVerifier([]byte(PublicKey)), nil}, {2, true, rfh, artifact.NewVerifier([]byte(PublicKeyError)), errors.New("reader: invalid signature: crypto/rsa: verification error")}, // // test that we do not need a verifier for signed artifact {2, true, rfh, nil, nil}, } // first create archive, that we will be able to read for _, test := range tc { art, err := MakeRootfsImageArtifact(test.version, test.signed, false) assert.NoError(t, err) aReader := NewReader(art) if test.handler != nil { aReader.RegisterHandler(test.handler) } if test.verifier != nil { aReader.VerifySignatureCallback = test.verifier.Verify } err = aReader.ReadArtifact() if test.readError != nil { assert.Equal(t, test.readError.Error(), err.Error()) continue } assert.NoError(t, err) assert.Equal(t, TestUpdateFileContent, updFileContent.String()) devComp := aReader.GetCompatibleDevices() assert.Len(t, devComp, 1) assert.Equal(t, "vexpress", devComp[0]) if test.handler != nil { assert.Len(t, aReader.GetHandlers(), 1) assert.Equal(t, test.handler.GetType(), aReader.GetHandlers()[0].GetType()) } assert.Equal(t, "mender-1.1", aReader.GetArtifactName()) // clean the buffer updFileContent.Reset() } } func TestReadSigned(t *testing.T) { art, err := MakeRootfsImageArtifact(2, true, false) assert.NoError(t, err) aReader := NewReaderSigned(art) err = aReader.ReadArtifact() assert.Error(t, err) assert.Contains(t, err.Error(), "reader: verify signature callback not registered") art, err = MakeRootfsImageArtifact(2, false, false) assert.NoError(t, err) aReader = NewReaderSigned(art) err = aReader.ReadArtifact() assert.Error(t, err) assert.Contains(t, err.Error(), "reader: expecting signed artifact, but no signature file found") art, err = MakeRootfsImageArtifact(2, true, false) assert.NoError(t, err) aReader = NewReader(art) err = aReader.ReadArtifact() assert.NoError(t, err) art, err = MakeRootfsImageArtifact(1, false, false) assert.NoError(t, err) aReader = NewReaderSigned(art) err = aReader.ReadArtifact() assert.Error(t, err) assert.Contains(t, err.Error(), "reader: expecting signed artifact") } func TestRegisterMultipleHandlers(t *testing.T) { aReader := NewReader(nil) err := aReader.RegisterHandler(handlers.NewRootfsInstaller()) assert.NoError(t, err) err = aReader.RegisterHandler(handlers.NewRootfsInstaller()) assert.Error(t, err) err = aReader.RegisterHandler(nil) assert.Error(t, err) } func TestReadNoHandler(t *testing.T) { art, err := MakeRootfsImageArtifact(1, false, false) assert.NoError(t, err) aReader := NewReader(art) err = aReader.ReadArtifact() assert.NoError(t, err) assert.Len(t, aReader.GetHandlers(), 1) assert.Equal(t, "rootfs-image", aReader.GetHandlers()[0].GetType()) } func TestReadBroken(t *testing.T) { broken := []byte("this is broken artifact") buf := bytes.NewBuffer(broken) aReader := NewReader(buf) err := aReader.ReadArtifact() assert.Error(t, err) aReader = NewReader(nil) err = aReader.ReadArtifact() assert.Error(t, err) } func TestReadWithScripts(t *testing.T) { art, err := MakeRootfsImageArtifact(2, false, true) assert.NoError(t, err) aReader := NewReader(art) noExec := 0 aReader.ScriptsReadCallback = func(r io.Reader, info os.FileInfo) error { noExec++ assert.Contains(t, info.Name(), "ArtifactInstall_Enter_10_") buf := bytes.NewBuffer(nil) _, err = io.Copy(buf, r) assert.NoError(t, err) assert.Equal(t, "execute me!", buf.String()) return nil } err = aReader.ReadArtifact() assert.NoError(t, err) assert.Equal(t, 1, noExec) } func MakeFakeUpdate(data string) (string, error) { f, err := ioutil.TempFile("", "test_update") if err != nil { return "", err } defer f.Close() if len(data) > 0 { if _, err := f.WriteString(data); err != nil { return "", err } } return f.Name(), nil } type installer struct { Data *handlers.DataFile } func (i *installer) GetUpdateFiles() [](*handlers.DataFile) { return [](*handlers.DataFile){i.Data} } func (i *installer) GetType() string { return "" } func (i *installer) Copy() handlers.Installer { return i } func (i *installer) ReadHeader(r io.Reader, path string) error
func (i *installer) Install(r io.Reader, info *os.FileInfo) error { _, err := io.Copy(ioutil.Discard, r) return err } func writeDataFile(t *testing.T, name, data string) io.Reader { comp := artifact.NewCompressorGzip() buf := bytes.NewBuffer(nil) gz, err := comp.NewWriter(buf) assert.NoError(t, err) tw := tar.NewWriter(gz) sw := artifact.NewTarWriterStream(tw) err = sw.Write([]byte(data), name) assert.NoError(t, err) err = tw.Close() assert.NoError(t, err) err = gz.Close() assert.NoError(t, err) return buf } func TestReadAndInstall(t *testing.T) { comp := artifact.NewCompressorGzip() err := readAndInstall(bytes.NewBuffer(nil), nil, nil, 1, comp) assert.Error(t, err) assert.Equal(t, "EOF", errors.Cause(err).Error()) i := &installer{ Data: &handlers.DataFile{ Name: "update.ext4", // this is a calculated checksum of `data` string Checksum: []byte("3a6eb0790f39ac87c94f3856b2dd2c5d110e6811602261a9a923d3bb23adc8b7"), }, } r := writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.NoError(t, err) assert.Equal(t, int64(len("data")), i.GetUpdateFiles()[0].Size) // test missing data file i = &installer{ Data: &handlers.DataFile{ Name: "non-existing", }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.Error(t, err) assert.Equal(t, "update: can not find data file: update.ext4", errors.Cause(err).Error()) // test missing checksum i = &installer{ Data: &handlers.DataFile{ Name: "update.ext4", }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.Error(t, err) assert.Equal(t, "update: checksum missing for file: update.ext4", errors.Cause(err).Error()) // test with manifest m := artifact.NewChecksumStore() i = &installer{ Data: &handlers.DataFile{ Name: "update.ext4", Checksum: []byte("3a6eb0790f39ac87c94f3856b2dd2c5d110e6811602261a9a923d3bb23adc8b7"), }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, m, 1, comp) assert.Error(t, err) assert.Contains(t, errors.Cause(err).Error(), "checksum missing") // test invalid checksum i = &installer{ Data: &handlers.DataFile{ Name: "update.ext4", Checksum: []byte("12121212121212"), }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.Error(t, err) assert.Contains(t, errors.Cause(err).Error(), "invalid checksum") // test with manifest err = m.Add("update.ext4", []byte("3a6eb0790f39ac87c94f3856b2dd2c5d110e6811602261a9a923d3bb23adc8b7")) assert.NoError(t, err) r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, m, 1, comp) assert.Error(t, err) assert.Contains(t, errors.Cause(err).Error(), "checksum missing") }
{ return nil }
identifier_body
reader_test.go
// Copyright 2018 Northern.tech AS // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package areader import ( "archive/tar" "bytes" "io" "io/ioutil" "os" "testing" "github.com/mendersoftware/mender-artifact/artifact" "github.com/mendersoftware/mender-artifact/awriter" "github.com/mendersoftware/mender-artifact/handlers" "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) const ( TestUpdateFileContent = "test update" PublicKey = `-----BEGIN PUBLIC KEY----- MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDSTLzZ9hQq3yBB+dMDVbKem6ia v1J6opg6DICKkQ4M/yhlw32BCGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKc XwaUNml5EhW79AdibBXZiZt8fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne 5vbA+63vRCnrc8QuYwIDAQAB -----END PUBLIC KEY-----` PrivateKey = `-----BEGIN RSA PRIVATE KEY----- MIICXAIBAAKBgQDSTLzZ9hQq3yBB+dMDVbKem6iav1J6opg6DICKkQ4M/yhlw32B CGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKcXwaUNml5EhW79AdibBXZiZt8 fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne5vbA+63vRCnrc8QuYwIDAQAB AoGAQKIRELQOsrZsxZowfj/ia9jPUvAmO0apnn2lK/E07k2lbtFMS1H4m1XtGr8F oxQU7rLyyP/FmeJUqJyRXLwsJzma13OpxkQtZmRpL9jEwevnunHYJfceVapQOJ7/ 6Oz0pPWEq39GCn+tTMtgSmkEaSH8Ki9t32g9KuQIKBB2hbECQQDsg7D5fHQB1BXG HJm9JmYYX0Yk6Z2SWBr4mLO0C4hHBnV5qPCLyevInmaCV2cOjDZ5Sz6iF5RK5mw7 qzvFa8ePAkEA46Anom3cNXO5pjfDmn2CoqUvMeyrJUFL5aU6W1S6iFprZ/YwdHcC kS5yTngwVOmcnT65Vnycygn+tZan2A0h7QJBAJNlowZovDdjgEpeCqXp51irD6Dz gsLwa6agK+Y6Ba0V5mJyma7UoT//D62NYOmdElnXPepwvXdMUQmCtpZbjBsCQD5H VHDJlCV/yzyiJz9+tZ5giaAkO9NOoUBsy6GvdfXWn2prXmiPI0GrrpSvp7Gj1Tjk r3rtT0ysHWd7l+Kx/SUCQGlitd5RDfdHl+gKrCwhNnRG7FzRLv5YOQV81+kh7SkU 73TXPIqLESVrqWKDfLwfsfEpV248MSRou+y0O1mtFpo= -----END RSA PRIVATE KEY----- ` PublicKeyError = `-----BEGIN PUBLIC KEY----- MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDSTLzZ9hQq3yBB+dMDVbKem6ia v1J6opg6DICKkQ4M/yhlw32BCGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKc XwaUNml5EhW79AdibBXZiZt8fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne 5vbA+63vRCnrc8QuYwIDAQAC -----END PUBLIC KEY-----` ) func MakeRootfsImageArtifact(version int, signed bool, hasScripts bool) (io.Reader, error) { upd, err := MakeFakeUpdate(TestUpdateFileContent) if err != nil { return nil, err } defer os.Remove(upd) comp := artifact.NewCompressorGzip() art := bytes.NewBuffer(nil) var aw *awriter.Writer if !signed { aw = awriter.NewWriter(art, comp) } else { s := artifact.NewSigner([]byte(PrivateKey)) aw = awriter.NewWriterSigned(art, comp, s) } var u handlers.Composer switch version { case 1: u = handlers.NewRootfsV1(upd, comp) case 2: u = handlers.NewRootfsV2(upd, comp) } scr := artifact.Scripts{} if hasScripts { s, err := ioutil.TempFile("", "ArtifactInstall_Enter_10_") if err != nil { return nil, err } defer os.Remove(s.Name()) _, err = io.WriteString(s, "execute me!") if err := scr.Add(s.Name()); err != nil { return nil, err } } updates := &awriter.Updates{U: []handlers.Composer{u}} err = aw.WriteArtifact("mender", version, []string{"vexpress"}, "mender-1.1", updates, &scr) if err != nil { return nil, err } return art, nil } func TestReadArtifact(t *testing.T) { updFileContent := bytes.NewBuffer(nil) copy := func(r io.Reader, f *handlers.DataFile) error { _, err := io.Copy(updFileContent, r) return err } rfh := handlers.NewRootfsInstaller() rfh.InstallHandler = copy tc := []struct { version int signed bool handler handlers.Installer verifier artifact.Verifier readError error }{ {1, false, rfh, nil, nil}, {2, false, rfh, nil, nil}, {2, true, rfh, artifact.NewVerifier([]byte(PublicKey)), nil}, {2, true, rfh, artifact.NewVerifier([]byte(PublicKeyError)), errors.New("reader: invalid signature: crypto/rsa: verification error")}, // // test that we do not need a verifier for signed artifact {2, true, rfh, nil, nil}, } // first create archive, that we will be able to read for _, test := range tc { art, err := MakeRootfsImageArtifact(test.version, test.signed, false) assert.NoError(t, err) aReader := NewReader(art) if test.handler != nil { aReader.RegisterHandler(test.handler) } if test.verifier != nil { aReader.VerifySignatureCallback = test.verifier.Verify } err = aReader.ReadArtifact() if test.readError != nil { assert.Equal(t, test.readError.Error(), err.Error()) continue } assert.NoError(t, err) assert.Equal(t, TestUpdateFileContent, updFileContent.String()) devComp := aReader.GetCompatibleDevices() assert.Len(t, devComp, 1) assert.Equal(t, "vexpress", devComp[0]) if test.handler != nil { assert.Len(t, aReader.GetHandlers(), 1) assert.Equal(t, test.handler.GetType(), aReader.GetHandlers()[0].GetType()) } assert.Equal(t, "mender-1.1", aReader.GetArtifactName()) // clean the buffer updFileContent.Reset() } } func TestReadSigned(t *testing.T) { art, err := MakeRootfsImageArtifact(2, true, false) assert.NoError(t, err) aReader := NewReaderSigned(art) err = aReader.ReadArtifact() assert.Error(t, err) assert.Contains(t, err.Error(), "reader: verify signature callback not registered") art, err = MakeRootfsImageArtifact(2, false, false) assert.NoError(t, err) aReader = NewReaderSigned(art) err = aReader.ReadArtifact() assert.Error(t, err) assert.Contains(t, err.Error(), "reader: expecting signed artifact, but no signature file found") art, err = MakeRootfsImageArtifact(2, true, false) assert.NoError(t, err) aReader = NewReader(art) err = aReader.ReadArtifact() assert.NoError(t, err) art, err = MakeRootfsImageArtifact(1, false, false) assert.NoError(t, err) aReader = NewReaderSigned(art) err = aReader.ReadArtifact() assert.Error(t, err) assert.Contains(t, err.Error(), "reader: expecting signed artifact") } func TestRegisterMultipleHandlers(t *testing.T) { aReader := NewReader(nil) err := aReader.RegisterHandler(handlers.NewRootfsInstaller()) assert.NoError(t, err) err = aReader.RegisterHandler(handlers.NewRootfsInstaller()) assert.Error(t, err) err = aReader.RegisterHandler(nil) assert.Error(t, err) } func TestReadNoHandler(t *testing.T) { art, err := MakeRootfsImageArtifact(1, false, false) assert.NoError(t, err) aReader := NewReader(art) err = aReader.ReadArtifact() assert.NoError(t, err) assert.Len(t, aReader.GetHandlers(), 1) assert.Equal(t, "rootfs-image", aReader.GetHandlers()[0].GetType()) } func TestReadBroken(t *testing.T) { broken := []byte("this is broken artifact") buf := bytes.NewBuffer(broken) aReader := NewReader(buf) err := aReader.ReadArtifact() assert.Error(t, err) aReader = NewReader(nil) err = aReader.ReadArtifact() assert.Error(t, err) } func TestReadWithScripts(t *testing.T) { art, err := MakeRootfsImageArtifact(2, false, true) assert.NoError(t, err) aReader := NewReader(art) noExec := 0 aReader.ScriptsReadCallback = func(r io.Reader, info os.FileInfo) error { noExec++ assert.Contains(t, info.Name(), "ArtifactInstall_Enter_10_") buf := bytes.NewBuffer(nil) _, err = io.Copy(buf, r) assert.NoError(t, err) assert.Equal(t, "execute me!", buf.String()) return nil } err = aReader.ReadArtifact() assert.NoError(t, err) assert.Equal(t, 1, noExec) } func MakeFakeUpdate(data string) (string, error) { f, err := ioutil.TempFile("", "test_update") if err != nil { return "", err } defer f.Close() if len(data) > 0 { if _, err := f.WriteString(data); err != nil { return "", err } } return f.Name(), nil } type installer struct { Data *handlers.DataFile } func (i *installer) GetUpdateFiles() [](*handlers.DataFile) { return [](*handlers.DataFile){i.Data} } func (i *installer)
() string { return "" } func (i *installer) Copy() handlers.Installer { return i } func (i *installer) ReadHeader(r io.Reader, path string) error { return nil } func (i *installer) Install(r io.Reader, info *os.FileInfo) error { _, err := io.Copy(ioutil.Discard, r) return err } func writeDataFile(t *testing.T, name, data string) io.Reader { comp := artifact.NewCompressorGzip() buf := bytes.NewBuffer(nil) gz, err := comp.NewWriter(buf) assert.NoError(t, err) tw := tar.NewWriter(gz) sw := artifact.NewTarWriterStream(tw) err = sw.Write([]byte(data), name) assert.NoError(t, err) err = tw.Close() assert.NoError(t, err) err = gz.Close() assert.NoError(t, err) return buf } func TestReadAndInstall(t *testing.T) { comp := artifact.NewCompressorGzip() err := readAndInstall(bytes.NewBuffer(nil), nil, nil, 1, comp) assert.Error(t, err) assert.Equal(t, "EOF", errors.Cause(err).Error()) i := &installer{ Data: &handlers.DataFile{ Name: "update.ext4", // this is a calculated checksum of `data` string Checksum: []byte("3a6eb0790f39ac87c94f3856b2dd2c5d110e6811602261a9a923d3bb23adc8b7"), }, } r := writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.NoError(t, err) assert.Equal(t, int64(len("data")), i.GetUpdateFiles()[0].Size) // test missing data file i = &installer{ Data: &handlers.DataFile{ Name: "non-existing", }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.Error(t, err) assert.Equal(t, "update: can not find data file: update.ext4", errors.Cause(err).Error()) // test missing checksum i = &installer{ Data: &handlers.DataFile{ Name: "update.ext4", }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.Error(t, err) assert.Equal(t, "update: checksum missing for file: update.ext4", errors.Cause(err).Error()) // test with manifest m := artifact.NewChecksumStore() i = &installer{ Data: &handlers.DataFile{ Name: "update.ext4", Checksum: []byte("3a6eb0790f39ac87c94f3856b2dd2c5d110e6811602261a9a923d3bb23adc8b7"), }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, m, 1, comp) assert.Error(t, err) assert.Contains(t, errors.Cause(err).Error(), "checksum missing") // test invalid checksum i = &installer{ Data: &handlers.DataFile{ Name: "update.ext4", Checksum: []byte("12121212121212"), }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.Error(t, err) assert.Contains(t, errors.Cause(err).Error(), "invalid checksum") // test with manifest err = m.Add("update.ext4", []byte("3a6eb0790f39ac87c94f3856b2dd2c5d110e6811602261a9a923d3bb23adc8b7")) assert.NoError(t, err) r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, m, 1, comp) assert.Error(t, err) assert.Contains(t, errors.Cause(err).Error(), "checksum missing") }
GetType
identifier_name
reader_test.go
// Copyright 2018 Northern.tech AS // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package areader import ( "archive/tar" "bytes" "io" "io/ioutil" "os" "testing" "github.com/mendersoftware/mender-artifact/artifact" "github.com/mendersoftware/mender-artifact/awriter" "github.com/mendersoftware/mender-artifact/handlers" "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) const ( TestUpdateFileContent = "test update" PublicKey = `-----BEGIN PUBLIC KEY----- MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDSTLzZ9hQq3yBB+dMDVbKem6ia v1J6opg6DICKkQ4M/yhlw32BCGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKc XwaUNml5EhW79AdibBXZiZt8fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne 5vbA+63vRCnrc8QuYwIDAQAB -----END PUBLIC KEY-----` PrivateKey = `-----BEGIN RSA PRIVATE KEY----- MIICXAIBAAKBgQDSTLzZ9hQq3yBB+dMDVbKem6iav1J6opg6DICKkQ4M/yhlw32B CGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKcXwaUNml5EhW79AdibBXZiZt8 fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne5vbA+63vRCnrc8QuYwIDAQAB AoGAQKIRELQOsrZsxZowfj/ia9jPUvAmO0apnn2lK/E07k2lbtFMS1H4m1XtGr8F oxQU7rLyyP/FmeJUqJyRXLwsJzma13OpxkQtZmRpL9jEwevnunHYJfceVapQOJ7/ 6Oz0pPWEq39GCn+tTMtgSmkEaSH8Ki9t32g9KuQIKBB2hbECQQDsg7D5fHQB1BXG HJm9JmYYX0Yk6Z2SWBr4mLO0C4hHBnV5qPCLyevInmaCV2cOjDZ5Sz6iF5RK5mw7 qzvFa8ePAkEA46Anom3cNXO5pjfDmn2CoqUvMeyrJUFL5aU6W1S6iFprZ/YwdHcC kS5yTngwVOmcnT65Vnycygn+tZan2A0h7QJBAJNlowZovDdjgEpeCqXp51irD6Dz gsLwa6agK+Y6Ba0V5mJyma7UoT//D62NYOmdElnXPepwvXdMUQmCtpZbjBsCQD5H VHDJlCV/yzyiJz9+tZ5giaAkO9NOoUBsy6GvdfXWn2prXmiPI0GrrpSvp7Gj1Tjk r3rtT0ysHWd7l+Kx/SUCQGlitd5RDfdHl+gKrCwhNnRG7FzRLv5YOQV81+kh7SkU 73TXPIqLESVrqWKDfLwfsfEpV248MSRou+y0O1mtFpo= -----END RSA PRIVATE KEY----- ` PublicKeyError = `-----BEGIN PUBLIC KEY----- MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDSTLzZ9hQq3yBB+dMDVbKem6ia v1J6opg6DICKkQ4M/yhlw32BCGm2ArM3VwQRgq6Q1sNSq953n5c1EO3Xcy/qTAKc XwaUNml5EhW79AdibBXZiZt8fMhCjUd/4ce3rLNjnbIn1o9L6pzV4CcVJ8+iNhne 5vbA+63vRCnrc8QuYwIDAQAC -----END PUBLIC KEY-----` ) func MakeRootfsImageArtifact(version int, signed bool, hasScripts bool) (io.Reader, error) { upd, err := MakeFakeUpdate(TestUpdateFileContent) if err != nil { return nil, err } defer os.Remove(upd) comp := artifact.NewCompressorGzip() art := bytes.NewBuffer(nil) var aw *awriter.Writer if !signed { aw = awriter.NewWriter(art, comp) } else { s := artifact.NewSigner([]byte(PrivateKey)) aw = awriter.NewWriterSigned(art, comp, s) } var u handlers.Composer switch version { case 1: u = handlers.NewRootfsV1(upd, comp) case 2: u = handlers.NewRootfsV2(upd, comp) } scr := artifact.Scripts{} if hasScripts { s, err := ioutil.TempFile("", "ArtifactInstall_Enter_10_") if err != nil { return nil, err } defer os.Remove(s.Name()) _, err = io.WriteString(s, "execute me!") if err := scr.Add(s.Name()); err != nil { return nil, err } } updates := &awriter.Updates{U: []handlers.Composer{u}} err = aw.WriteArtifact("mender", version, []string{"vexpress"}, "mender-1.1", updates, &scr) if err != nil { return nil, err } return art, nil } func TestReadArtifact(t *testing.T) { updFileContent := bytes.NewBuffer(nil) copy := func(r io.Reader, f *handlers.DataFile) error { _, err := io.Copy(updFileContent, r) return err } rfh := handlers.NewRootfsInstaller() rfh.InstallHandler = copy tc := []struct { version int signed bool handler handlers.Installer verifier artifact.Verifier readError error }{ {1, false, rfh, nil, nil}, {2, false, rfh, nil, nil}, {2, true, rfh, artifact.NewVerifier([]byte(PublicKey)), nil}, {2, true, rfh, artifact.NewVerifier([]byte(PublicKeyError)), errors.New("reader: invalid signature: crypto/rsa: verification error")}, // // test that we do not need a verifier for signed artifact {2, true, rfh, nil, nil}, } // first create archive, that we will be able to read for _, test := range tc { art, err := MakeRootfsImageArtifact(test.version, test.signed, false) assert.NoError(t, err) aReader := NewReader(art) if test.handler != nil { aReader.RegisterHandler(test.handler) } if test.verifier != nil { aReader.VerifySignatureCallback = test.verifier.Verify } err = aReader.ReadArtifact() if test.readError != nil
assert.NoError(t, err) assert.Equal(t, TestUpdateFileContent, updFileContent.String()) devComp := aReader.GetCompatibleDevices() assert.Len(t, devComp, 1) assert.Equal(t, "vexpress", devComp[0]) if test.handler != nil { assert.Len(t, aReader.GetHandlers(), 1) assert.Equal(t, test.handler.GetType(), aReader.GetHandlers()[0].GetType()) } assert.Equal(t, "mender-1.1", aReader.GetArtifactName()) // clean the buffer updFileContent.Reset() } } func TestReadSigned(t *testing.T) { art, err := MakeRootfsImageArtifact(2, true, false) assert.NoError(t, err) aReader := NewReaderSigned(art) err = aReader.ReadArtifact() assert.Error(t, err) assert.Contains(t, err.Error(), "reader: verify signature callback not registered") art, err = MakeRootfsImageArtifact(2, false, false) assert.NoError(t, err) aReader = NewReaderSigned(art) err = aReader.ReadArtifact() assert.Error(t, err) assert.Contains(t, err.Error(), "reader: expecting signed artifact, but no signature file found") art, err = MakeRootfsImageArtifact(2, true, false) assert.NoError(t, err) aReader = NewReader(art) err = aReader.ReadArtifact() assert.NoError(t, err) art, err = MakeRootfsImageArtifact(1, false, false) assert.NoError(t, err) aReader = NewReaderSigned(art) err = aReader.ReadArtifact() assert.Error(t, err) assert.Contains(t, err.Error(), "reader: expecting signed artifact") } func TestRegisterMultipleHandlers(t *testing.T) { aReader := NewReader(nil) err := aReader.RegisterHandler(handlers.NewRootfsInstaller()) assert.NoError(t, err) err = aReader.RegisterHandler(handlers.NewRootfsInstaller()) assert.Error(t, err) err = aReader.RegisterHandler(nil) assert.Error(t, err) } func TestReadNoHandler(t *testing.T) { art, err := MakeRootfsImageArtifact(1, false, false) assert.NoError(t, err) aReader := NewReader(art) err = aReader.ReadArtifact() assert.NoError(t, err) assert.Len(t, aReader.GetHandlers(), 1) assert.Equal(t, "rootfs-image", aReader.GetHandlers()[0].GetType()) } func TestReadBroken(t *testing.T) { broken := []byte("this is broken artifact") buf := bytes.NewBuffer(broken) aReader := NewReader(buf) err := aReader.ReadArtifact() assert.Error(t, err) aReader = NewReader(nil) err = aReader.ReadArtifact() assert.Error(t, err) } func TestReadWithScripts(t *testing.T) { art, err := MakeRootfsImageArtifact(2, false, true) assert.NoError(t, err) aReader := NewReader(art) noExec := 0 aReader.ScriptsReadCallback = func(r io.Reader, info os.FileInfo) error { noExec++ assert.Contains(t, info.Name(), "ArtifactInstall_Enter_10_") buf := bytes.NewBuffer(nil) _, err = io.Copy(buf, r) assert.NoError(t, err) assert.Equal(t, "execute me!", buf.String()) return nil } err = aReader.ReadArtifact() assert.NoError(t, err) assert.Equal(t, 1, noExec) } func MakeFakeUpdate(data string) (string, error) { f, err := ioutil.TempFile("", "test_update") if err != nil { return "", err } defer f.Close() if len(data) > 0 { if _, err := f.WriteString(data); err != nil { return "", err } } return f.Name(), nil } type installer struct { Data *handlers.DataFile } func (i *installer) GetUpdateFiles() [](*handlers.DataFile) { return [](*handlers.DataFile){i.Data} } func (i *installer) GetType() string { return "" } func (i *installer) Copy() handlers.Installer { return i } func (i *installer) ReadHeader(r io.Reader, path string) error { return nil } func (i *installer) Install(r io.Reader, info *os.FileInfo) error { _, err := io.Copy(ioutil.Discard, r) return err } func writeDataFile(t *testing.T, name, data string) io.Reader { comp := artifact.NewCompressorGzip() buf := bytes.NewBuffer(nil) gz, err := comp.NewWriter(buf) assert.NoError(t, err) tw := tar.NewWriter(gz) sw := artifact.NewTarWriterStream(tw) err = sw.Write([]byte(data), name) assert.NoError(t, err) err = tw.Close() assert.NoError(t, err) err = gz.Close() assert.NoError(t, err) return buf } func TestReadAndInstall(t *testing.T) { comp := artifact.NewCompressorGzip() err := readAndInstall(bytes.NewBuffer(nil), nil, nil, 1, comp) assert.Error(t, err) assert.Equal(t, "EOF", errors.Cause(err).Error()) i := &installer{ Data: &handlers.DataFile{ Name: "update.ext4", // this is a calculated checksum of `data` string Checksum: []byte("3a6eb0790f39ac87c94f3856b2dd2c5d110e6811602261a9a923d3bb23adc8b7"), }, } r := writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.NoError(t, err) assert.Equal(t, int64(len("data")), i.GetUpdateFiles()[0].Size) // test missing data file i = &installer{ Data: &handlers.DataFile{ Name: "non-existing", }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.Error(t, err) assert.Equal(t, "update: can not find data file: update.ext4", errors.Cause(err).Error()) // test missing checksum i = &installer{ Data: &handlers.DataFile{ Name: "update.ext4", }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.Error(t, err) assert.Equal(t, "update: checksum missing for file: update.ext4", errors.Cause(err).Error()) // test with manifest m := artifact.NewChecksumStore() i = &installer{ Data: &handlers.DataFile{ Name: "update.ext4", Checksum: []byte("3a6eb0790f39ac87c94f3856b2dd2c5d110e6811602261a9a923d3bb23adc8b7"), }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, m, 1, comp) assert.Error(t, err) assert.Contains(t, errors.Cause(err).Error(), "checksum missing") // test invalid checksum i = &installer{ Data: &handlers.DataFile{ Name: "update.ext4", Checksum: []byte("12121212121212"), }, } r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, nil, 1, comp) assert.Error(t, err) assert.Contains(t, errors.Cause(err).Error(), "invalid checksum") // test with manifest err = m.Add("update.ext4", []byte("3a6eb0790f39ac87c94f3856b2dd2c5d110e6811602261a9a923d3bb23adc8b7")) assert.NoError(t, err) r = writeDataFile(t, "update.ext4", "data") err = readAndInstall(r, i, m, 1, comp) assert.Error(t, err) assert.Contains(t, errors.Cause(err).Error(), "checksum missing") }
{ assert.Equal(t, test.readError.Error(), err.Error()) continue }
conditional_block
analysisPlots_TWZ_nLep.py
#!/usr/bin/env python ''' Analysis script for standard plots ''' # # Standard imports and batch mode # import ROOT, os ROOT.gROOT.SetBatch(True) import itertools from math import sqrt, cos, sin, pi, acos, cosh from RootTools.core.standard import * from TopEFT.Tools.user import plot_directory from TopEFT.Tools.helpers import deltaPhi, getObjDict, getVarValue, deltaR, deltaR2 from TopEFT.Tools.objectSelection import getFilterCut from TopEFT.Tools.cutInterpreter import cutInterpreter from TopEFT.Tools.triggerSelector import triggerSelector from TopEFT.samples.color import color # for mt2ll from TopEFT.Tools.mt2Calculator import mt2Calculator mt2Calc = mt2Calculator() # # Arguments # import argparse argParser = argparse.ArgumentParser(description = "Argument parser") argParser.add_argument('--logLevel', action='store', default='INFO', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'], help="Log level for logging") argParser.add_argument('--noData', action='store_true', default=True, help='also plot data?') argParser.add_argument('--small', action='store_true', help='Run only on a small subset of the data?', ) argParser.add_argument('--plot_directory', action='store', default='Histo_Integral')#'analysisPlots_4l') argParser.add_argument('--selection', action='store', default='None')#'quadlepTWZ-onZ1-noZ2') # quadlep-lepSelQuad-njet2p-btag0p-onZ1-offZ2 or quadlep-lepSelQuad-njet2p-btag1p-onZ1-offZ2 for signal regions argParser.add_argument('--normalize', action='store_true', default=False, help="Normalize yields" ) argParser.add_argument('--year', action='store', default=2016, type=int, help="Which year?" ) args = argParser.parse_args() # PU reweighting on the fly from TopEFT.Tools.puProfileCache import puProfile from TopEFT.Tools.puReweighting import getReweightingFunction from TopEFT.samples.helpers import fromHeppySample # # Logger # import TopEFT.Tools.logger as logger import RootTools.core.logger as logger_rt logger = logger.get_logger( args.logLevel, logFile = None) logger_rt = logger_rt.get_logger(args.logLevel, logFile = None) if args.small: args.plot_directory += "_small" if args.noData: args.plot_directory += "_noData" if args.normalize: args.plot_directory += "_normalize" # # Make samples, will be searched for in the postProcessing directory # if args.year == 2016: data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/" postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/" from TopEFT.samples.cmgTuples_Data25ns_80X_07Aug17_postProcessed import * data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/" postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/" from TopEFT.samples.cmgTuples_Summer16_mAODv2_postProcessed import * data_directory = "/afs/hephy.at/data/rschoefbeck01/cmgTuples/" signals = [] # # Text on the plots # def drawObjects( plotData, dataMCScale, lumi_scale ): tex = ROOT.TLatex() tex.SetNDC() tex.SetTextSize(0.04) tex.SetTextAlign(11) # align right lines = [ (0.15, 0.95, 'CMS Preliminary' if plotData else 'CMS Simulation'), (0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV) Scale %3.2f'% ( lumi_scale, dataMCScale ) ) if plotData else (0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV)' % lumi_scale) ] return [tex.DrawLatex(*l) for l in lines] scaling = { i+1:0 for i in range(len(signals)) } def drawPlots(plots, mode, dataMCScale): for log in [False, True]: plot_directory_ = os.path.join(plot_directory, 'analysisPlots', args.plot_directory, mode + ("_log" if log else ""), args.selection) for plot in plots: if not max(l[0].GetMaximum() for l in plot.histos): continue # Empty plot if not args.noData: if mode == "all": plot.histos[1][0].legendText = "Data" if mode == "SF": plot.histos[1][0].legendText = "Data (SF)" extensions_ = ["pdf", "png", "root"]# if mode == 'all' else ['png'] plotting.draw(plot, plot_directory = plot_directory_, extensions = extensions_, ratio = {'yRange':(0.1,1.9)} if not args.noData else None, logX = False, logY = log, sorting = True, yRange = (0.03, "auto") if log else (0.001, "auto"), scaling = scaling if args.normalize else {}, legend = [ (0.15,0.9-0.03*sum(map(len, plot.histos)),0.9,0.9), 2], drawObjects = drawObjects( not args.noData, dataMCScale , lumi_scale ), copyIndexPHP = True, ) def getLeptonSelection( mode ): # if mode=="nLep": return "nMuons_tight_4l+nElectrons_tight_4l>=1" if mode =="nLep": return "(1)" # # Read variables and sequences # read_variables = ["weight/F", "jet[pt/F,eta/F,phi/F,btagCSV/F,DFb/F,DFbb/F,id/I]", "njet/I","nJetSelected/I", "lep[mediumMuonId/I,pt/F,eta/F,phi/F,pdgId/I,miniRelIso/F,relIso03/F,relIso04/F,sip3d/F,lostHits/I,convVeto/I,dxy/F,dz/F,hadronicOverEm/F,dEtaScTrkIn/F,dPhiScTrkIn/F,eInvMinusPInv/F,full5x5_sigmaIetaIeta/F,mvaTTV/F]", "nlep/I", "met_pt/F", "met_phi/F", "metSig/F", "ht/F", "nBTag/I", "Z1_l1_index_4l/I", "Z1_l2_index_4l/I", "nonZ1_l1_index_4l/I", "nonZ1_l2_index_4l/I", "Z2_l1_index_4l/I", "Z2_l2_index_4l/I", "Z1_phi_4l/F","Z1_pt_4l/F", "Z1_mass_4l/F", "Z1_eta_4l/F","Z1_lldPhi_4l/F", "Z1_lldR_4l/F", "Z1_cosThetaStar_4l/F","Higgs_mass/F", "Z2_phi_4l/F","Z2_pt_4l/F", "Z2_mass_4l/F", "Z2_eta_4l/F", "Z2_cosThetaStar_4l/F", "totalLeptonCharge/I", ] sequence = [] def getLooseLeptonMult( event, sample ): leptons = [getObjDict(event, 'lep_', ['eta','pt','phi','charge', 'pdgId', 'sourceId','mediumMuonId'], i) for i in range(len(event.lep_pt))] lepLoose = [ l for l in leptons if l['pt'] > 10 and ((l['mediumMuonId'] and abs(l['pdgId'])==13) or abs(l['pdgId'])==11) ] event.nLepLoose = len(lepLoose) sequence.append( getLooseLeptonMult ) # # Loop over channels # yields = {} allPlots = {} allModes = ['nLep'] for index, mode in enumerate(allModes): yields[mode] = {} logger.info("Working on mode %s", mode) if not args.noData: data_sample = Run2016 if args.year == 2016 else Run2017 data_sample.texName = "data" data_sample.setSelectionString([getFilterCut(isData=True, year=args.year), getLeptonSelection(mode)]) data_sample.name = "data" data_sample.read_variables = ["evt/I","run/I"] data_sample.style = styles.errorStyle(ROOT.kBlack) lumi_scale = data_sample.lumi/1000 if args.noData: lumi_scale = 35.9 if args.year == 2016 else 41.0 weight_ = lambda event, sample: event.weight lumi_scale = 300 TTZ_mc = TTZtoLLNuNu if args.year == 2016: mc = [ yt_TWW ] # mc = [ dim6top_TTW ] # mc = [ TWZ ] # mc = [ yt_TWZ ] # mc = [ yt_TWZ_filter, yt_TWZ ] # mc = [ yt_TWZ_filter ] for sample in mc: sample.style = styles.fillStyle(sample.color) for sample in mc + signals: sample.scale = lumi_scale #sample.read_variables = ['reweightBTagCSVv2_SF/F', 'reweightBTagDeepCSV_SF/F', 'reweightPU36fb/F', 'reweightTrigger_tight_4l/F', 'reweightLeptonTrackingSF_tight_4l/F', 'nTrueInt/F', 'reweightPU36fb/F', 'reweightLeptonSF_tight_4l/F']#, 'reweightLeptonSF_tight_4l/F'] sample.weight = lambda event, sample: 1 # if args.year == 2016: # sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.reweightTrigger_tight_4l*event.reweightLeptonTrackingSF_tight_4l*event.reweightPU36fb*event.reweightLeptonSF_tight_4l #*nTrueInt36fb_puRW(event.nTrueInt) # else: # sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.reweightTrigger_tight_4l*event.reweightPU36fb*event.reweightLeptonSF_tight_4l #*event.reweightLeptonSF_tight_4l #*nTrueInt36fb_puRW(event.nTrueInt) # tr = triggerSelector(args.year) sample.setSelectionString(getLeptonSelection(mode)) #sample.setSelectionString([getFilterCut(isData=False, year=args.year), getLeptonSelection(mode), tr.getSelection("MC")]) if not args.noData: stack = Stack(mc, data_sample) else:
stack.extend( [ [s] for s in signals ] ) if args.small: for sample in stack.samples: sample.reduceFiles( to = 1 ) # Use some defaults Plot.setDefaults(stack = stack, weight = staticmethod(weight_), selectionString = cutInterpreter.cutString(args.selection), addOverFlowBin='both') plots = [] plots.append(Plot( texX = 'N_{l, loose}', texY = 'Number of Events', name = 'nLepLoose', attribute = lambda event, sample: event.nlep, #binning=[7,0.5,7.5], binning=[8,-0.5,7.5], )) plotting.fill(plots, read_variables = read_variables, sequence = sequence) if args.noData: yields[mode]["data"] = 0 # noData dataMCScale = 0 for plot in plots: for i, l in enumerate(plot.histos): for j, h in enumerate(l): test = h.Integral(4,6) print "INTEGRAL", test # print h.GetXaxis().SetBinLabel(3,"test") drawPlots(plots, mode, dataMCScale) allPlots[mode] = plots exit() # Add the different channels into SF and all for mode in ["comb1","comb2","comb3","all"]: yields[mode] = {} for y in yields[allModes[0]]: try: yields[mode][y] = sum(yields[c][y] for c in ['eeee','mueee','mumuee', 'mumumue', 'mumumumu']) except: yields[mode][y] = 0 dataMCScale = yields[mode]["data"]/yields[mode]["MC"] if yields[mode]["MC"] != 0 else float('nan') for plot in allPlots['mumumumu']: if mode=="comb1": tmp = allPlots['mumumue'] elif mode=="comb2": tmp = allPlots['mumuee'] elif mode=="comb3": tmp = allPlots['mueee'] else: tmp = allPlots['eeee'] for plot2 in (p for p in tmp if p.name == plot.name): for i, j in enumerate(list(itertools.chain.from_iterable(plot.histos))): for k, l in enumerate(list(itertools.chain.from_iterable(plot2.histos))): if i==k: j.Add(l) if mode == "all": drawPlots(allPlots['mumumumu'], mode, dataMCScale) logger.info( "Done with prefix %s and selectionString %s", args.selection, cutInterpreter.cutString(args.selection) )
stack = Stack(mc)
conditional_block
analysisPlots_TWZ_nLep.py
#!/usr/bin/env python ''' Analysis script for standard plots ''' # # Standard imports and batch mode # import ROOT, os ROOT.gROOT.SetBatch(True) import itertools from math import sqrt, cos, sin, pi, acos, cosh from RootTools.core.standard import * from TopEFT.Tools.user import plot_directory from TopEFT.Tools.helpers import deltaPhi, getObjDict, getVarValue, deltaR, deltaR2 from TopEFT.Tools.objectSelection import getFilterCut from TopEFT.Tools.cutInterpreter import cutInterpreter from TopEFT.Tools.triggerSelector import triggerSelector from TopEFT.samples.color import color # for mt2ll from TopEFT.Tools.mt2Calculator import mt2Calculator mt2Calc = mt2Calculator() # # Arguments # import argparse argParser = argparse.ArgumentParser(description = "Argument parser") argParser.add_argument('--logLevel', action='store', default='INFO', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'], help="Log level for logging") argParser.add_argument('--noData', action='store_true', default=True, help='also plot data?') argParser.add_argument('--small', action='store_true', help='Run only on a small subset of the data?', ) argParser.add_argument('--plot_directory', action='store', default='Histo_Integral')#'analysisPlots_4l') argParser.add_argument('--selection', action='store', default='None')#'quadlepTWZ-onZ1-noZ2') # quadlep-lepSelQuad-njet2p-btag0p-onZ1-offZ2 or quadlep-lepSelQuad-njet2p-btag1p-onZ1-offZ2 for signal regions argParser.add_argument('--normalize', action='store_true', default=False, help="Normalize yields" ) argParser.add_argument('--year', action='store', default=2016, type=int, help="Which year?" ) args = argParser.parse_args() # PU reweighting on the fly from TopEFT.Tools.puProfileCache import puProfile from TopEFT.Tools.puReweighting import getReweightingFunction from TopEFT.samples.helpers import fromHeppySample # # Logger # import TopEFT.Tools.logger as logger import RootTools.core.logger as logger_rt logger = logger.get_logger( args.logLevel, logFile = None) logger_rt = logger_rt.get_logger(args.logLevel, logFile = None) if args.small: args.plot_directory += "_small" if args.noData: args.plot_directory += "_noData" if args.normalize: args.plot_directory += "_normalize" # # Make samples, will be searched for in the postProcessing directory # if args.year == 2016: data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/" postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/" from TopEFT.samples.cmgTuples_Data25ns_80X_07Aug17_postProcessed import * data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/" postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/" from TopEFT.samples.cmgTuples_Summer16_mAODv2_postProcessed import * data_directory = "/afs/hephy.at/data/rschoefbeck01/cmgTuples/" signals = [] # # Text on the plots # def
( plotData, dataMCScale, lumi_scale ): tex = ROOT.TLatex() tex.SetNDC() tex.SetTextSize(0.04) tex.SetTextAlign(11) # align right lines = [ (0.15, 0.95, 'CMS Preliminary' if plotData else 'CMS Simulation'), (0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV) Scale %3.2f'% ( lumi_scale, dataMCScale ) ) if plotData else (0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV)' % lumi_scale) ] return [tex.DrawLatex(*l) for l in lines] scaling = { i+1:0 for i in range(len(signals)) } def drawPlots(plots, mode, dataMCScale): for log in [False, True]: plot_directory_ = os.path.join(plot_directory, 'analysisPlots', args.plot_directory, mode + ("_log" if log else ""), args.selection) for plot in plots: if not max(l[0].GetMaximum() for l in plot.histos): continue # Empty plot if not args.noData: if mode == "all": plot.histos[1][0].legendText = "Data" if mode == "SF": plot.histos[1][0].legendText = "Data (SF)" extensions_ = ["pdf", "png", "root"]# if mode == 'all' else ['png'] plotting.draw(plot, plot_directory = plot_directory_, extensions = extensions_, ratio = {'yRange':(0.1,1.9)} if not args.noData else None, logX = False, logY = log, sorting = True, yRange = (0.03, "auto") if log else (0.001, "auto"), scaling = scaling if args.normalize else {}, legend = [ (0.15,0.9-0.03*sum(map(len, plot.histos)),0.9,0.9), 2], drawObjects = drawObjects( not args.noData, dataMCScale , lumi_scale ), copyIndexPHP = True, ) def getLeptonSelection( mode ): # if mode=="nLep": return "nMuons_tight_4l+nElectrons_tight_4l>=1" if mode =="nLep": return "(1)" # # Read variables and sequences # read_variables = ["weight/F", "jet[pt/F,eta/F,phi/F,btagCSV/F,DFb/F,DFbb/F,id/I]", "njet/I","nJetSelected/I", "lep[mediumMuonId/I,pt/F,eta/F,phi/F,pdgId/I,miniRelIso/F,relIso03/F,relIso04/F,sip3d/F,lostHits/I,convVeto/I,dxy/F,dz/F,hadronicOverEm/F,dEtaScTrkIn/F,dPhiScTrkIn/F,eInvMinusPInv/F,full5x5_sigmaIetaIeta/F,mvaTTV/F]", "nlep/I", "met_pt/F", "met_phi/F", "metSig/F", "ht/F", "nBTag/I", "Z1_l1_index_4l/I", "Z1_l2_index_4l/I", "nonZ1_l1_index_4l/I", "nonZ1_l2_index_4l/I", "Z2_l1_index_4l/I", "Z2_l2_index_4l/I", "Z1_phi_4l/F","Z1_pt_4l/F", "Z1_mass_4l/F", "Z1_eta_4l/F","Z1_lldPhi_4l/F", "Z1_lldR_4l/F", "Z1_cosThetaStar_4l/F","Higgs_mass/F", "Z2_phi_4l/F","Z2_pt_4l/F", "Z2_mass_4l/F", "Z2_eta_4l/F", "Z2_cosThetaStar_4l/F", "totalLeptonCharge/I", ] sequence = [] def getLooseLeptonMult( event, sample ): leptons = [getObjDict(event, 'lep_', ['eta','pt','phi','charge', 'pdgId', 'sourceId','mediumMuonId'], i) for i in range(len(event.lep_pt))] lepLoose = [ l for l in leptons if l['pt'] > 10 and ((l['mediumMuonId'] and abs(l['pdgId'])==13) or abs(l['pdgId'])==11) ] event.nLepLoose = len(lepLoose) sequence.append( getLooseLeptonMult ) # # Loop over channels # yields = {} allPlots = {} allModes = ['nLep'] for index, mode in enumerate(allModes): yields[mode] = {} logger.info("Working on mode %s", mode) if not args.noData: data_sample = Run2016 if args.year == 2016 else Run2017 data_sample.texName = "data" data_sample.setSelectionString([getFilterCut(isData=True, year=args.year), getLeptonSelection(mode)]) data_sample.name = "data" data_sample.read_variables = ["evt/I","run/I"] data_sample.style = styles.errorStyle(ROOT.kBlack) lumi_scale = data_sample.lumi/1000 if args.noData: lumi_scale = 35.9 if args.year == 2016 else 41.0 weight_ = lambda event, sample: event.weight lumi_scale = 300 TTZ_mc = TTZtoLLNuNu if args.year == 2016: mc = [ yt_TWW ] # mc = [ dim6top_TTW ] # mc = [ TWZ ] # mc = [ yt_TWZ ] # mc = [ yt_TWZ_filter, yt_TWZ ] # mc = [ yt_TWZ_filter ] for sample in mc: sample.style = styles.fillStyle(sample.color) for sample in mc + signals: sample.scale = lumi_scale #sample.read_variables = ['reweightBTagCSVv2_SF/F', 'reweightBTagDeepCSV_SF/F', 'reweightPU36fb/F', 'reweightTrigger_tight_4l/F', 'reweightLeptonTrackingSF_tight_4l/F', 'nTrueInt/F', 'reweightPU36fb/F', 'reweightLeptonSF_tight_4l/F']#, 'reweightLeptonSF_tight_4l/F'] sample.weight = lambda event, sample: 1 # if args.year == 2016: # sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.reweightTrigger_tight_4l*event.reweightLeptonTrackingSF_tight_4l*event.reweightPU36fb*event.reweightLeptonSF_tight_4l #*nTrueInt36fb_puRW(event.nTrueInt) # else: # sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.reweightTrigger_tight_4l*event.reweightPU36fb*event.reweightLeptonSF_tight_4l #*event.reweightLeptonSF_tight_4l #*nTrueInt36fb_puRW(event.nTrueInt) # tr = triggerSelector(args.year) sample.setSelectionString(getLeptonSelection(mode)) #sample.setSelectionString([getFilterCut(isData=False, year=args.year), getLeptonSelection(mode), tr.getSelection("MC")]) if not args.noData: stack = Stack(mc, data_sample) else: stack = Stack(mc) stack.extend( [ [s] for s in signals ] ) if args.small: for sample in stack.samples: sample.reduceFiles( to = 1 ) # Use some defaults Plot.setDefaults(stack = stack, weight = staticmethod(weight_), selectionString = cutInterpreter.cutString(args.selection), addOverFlowBin='both') plots = [] plots.append(Plot( texX = 'N_{l, loose}', texY = 'Number of Events', name = 'nLepLoose', attribute = lambda event, sample: event.nlep, #binning=[7,0.5,7.5], binning=[8,-0.5,7.5], )) plotting.fill(plots, read_variables = read_variables, sequence = sequence) if args.noData: yields[mode]["data"] = 0 # noData dataMCScale = 0 for plot in plots: for i, l in enumerate(plot.histos): for j, h in enumerate(l): test = h.Integral(4,6) print "INTEGRAL", test # print h.GetXaxis().SetBinLabel(3,"test") drawPlots(plots, mode, dataMCScale) allPlots[mode] = plots exit() # Add the different channels into SF and all for mode in ["comb1","comb2","comb3","all"]: yields[mode] = {} for y in yields[allModes[0]]: try: yields[mode][y] = sum(yields[c][y] for c in ['eeee','mueee','mumuee', 'mumumue', 'mumumumu']) except: yields[mode][y] = 0 dataMCScale = yields[mode]["data"]/yields[mode]["MC"] if yields[mode]["MC"] != 0 else float('nan') for plot in allPlots['mumumumu']: if mode=="comb1": tmp = allPlots['mumumue'] elif mode=="comb2": tmp = allPlots['mumuee'] elif mode=="comb3": tmp = allPlots['mueee'] else: tmp = allPlots['eeee'] for plot2 in (p for p in tmp if p.name == plot.name): for i, j in enumerate(list(itertools.chain.from_iterable(plot.histos))): for k, l in enumerate(list(itertools.chain.from_iterable(plot2.histos))): if i==k: j.Add(l) if mode == "all": drawPlots(allPlots['mumumumu'], mode, dataMCScale) logger.info( "Done with prefix %s and selectionString %s", args.selection, cutInterpreter.cutString(args.selection) )
drawObjects
identifier_name
analysisPlots_TWZ_nLep.py
#!/usr/bin/env python ''' Analysis script for standard plots ''' # # Standard imports and batch mode # import ROOT, os ROOT.gROOT.SetBatch(True) import itertools from math import sqrt, cos, sin, pi, acos, cosh from RootTools.core.standard import * from TopEFT.Tools.user import plot_directory from TopEFT.Tools.helpers import deltaPhi, getObjDict, getVarValue, deltaR, deltaR2 from TopEFT.Tools.objectSelection import getFilterCut from TopEFT.Tools.cutInterpreter import cutInterpreter from TopEFT.Tools.triggerSelector import triggerSelector from TopEFT.samples.color import color # for mt2ll from TopEFT.Tools.mt2Calculator import mt2Calculator mt2Calc = mt2Calculator() # # Arguments # import argparse argParser = argparse.ArgumentParser(description = "Argument parser") argParser.add_argument('--logLevel', action='store', default='INFO', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'], help="Log level for logging") argParser.add_argument('--noData', action='store_true', default=True, help='also plot data?') argParser.add_argument('--small', action='store_true', help='Run only on a small subset of the data?', ) argParser.add_argument('--plot_directory', action='store', default='Histo_Integral')#'analysisPlots_4l') argParser.add_argument('--selection', action='store', default='None')#'quadlepTWZ-onZ1-noZ2') # quadlep-lepSelQuad-njet2p-btag0p-onZ1-offZ2 or quadlep-lepSelQuad-njet2p-btag1p-onZ1-offZ2 for signal regions argParser.add_argument('--normalize', action='store_true', default=False, help="Normalize yields" ) argParser.add_argument('--year', action='store', default=2016, type=int, help="Which year?" ) args = argParser.parse_args() # PU reweighting on the fly from TopEFT.Tools.puProfileCache import puProfile from TopEFT.Tools.puReweighting import getReweightingFunction from TopEFT.samples.helpers import fromHeppySample # # Logger # import TopEFT.Tools.logger as logger import RootTools.core.logger as logger_rt logger = logger.get_logger( args.logLevel, logFile = None) logger_rt = logger_rt.get_logger(args.logLevel, logFile = None) if args.small: args.plot_directory += "_small" if args.noData: args.plot_directory += "_noData" if args.normalize: args.plot_directory += "_normalize" # # Make samples, will be searched for in the postProcessing directory # if args.year == 2016: data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/" postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/" from TopEFT.samples.cmgTuples_Data25ns_80X_07Aug17_postProcessed import * data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/" postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/" from TopEFT.samples.cmgTuples_Summer16_mAODv2_postProcessed import * data_directory = "/afs/hephy.at/data/rschoefbeck01/cmgTuples/" signals = [] # # Text on the plots # def drawObjects( plotData, dataMCScale, lumi_scale ): tex = ROOT.TLatex() tex.SetNDC() tex.SetTextSize(0.04) tex.SetTextAlign(11) # align right lines = [ (0.15, 0.95, 'CMS Preliminary' if plotData else 'CMS Simulation'), (0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV) Scale %3.2f'% ( lumi_scale, dataMCScale ) ) if plotData else (0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV)' % lumi_scale) ] return [tex.DrawLatex(*l) for l in lines] scaling = { i+1:0 for i in range(len(signals)) } def drawPlots(plots, mode, dataMCScale): for log in [False, True]: plot_directory_ = os.path.join(plot_directory, 'analysisPlots', args.plot_directory, mode + ("_log" if log else ""), args.selection) for plot in plots: if not max(l[0].GetMaximum() for l in plot.histos): continue # Empty plot if not args.noData: if mode == "all": plot.histos[1][0].legendText = "Data" if mode == "SF": plot.histos[1][0].legendText = "Data (SF)" extensions_ = ["pdf", "png", "root"]# if mode == 'all' else ['png'] plotting.draw(plot, plot_directory = plot_directory_, extensions = extensions_, ratio = {'yRange':(0.1,1.9)} if not args.noData else None, logX = False, logY = log, sorting = True, yRange = (0.03, "auto") if log else (0.001, "auto"), scaling = scaling if args.normalize else {}, legend = [ (0.15,0.9-0.03*sum(map(len, plot.histos)),0.9,0.9), 2], drawObjects = drawObjects( not args.noData, dataMCScale , lumi_scale ), copyIndexPHP = True, ) def getLeptonSelection( mode ): # if mode=="nLep": return "nMuons_tight_4l+nElectrons_tight_4l>=1" if mode =="nLep": return "(1)" # # Read variables and sequences # read_variables = ["weight/F", "jet[pt/F,eta/F,phi/F,btagCSV/F,DFb/F,DFbb/F,id/I]", "njet/I","nJetSelected/I", "lep[mediumMuonId/I,pt/F,eta/F,phi/F,pdgId/I,miniRelIso/F,relIso03/F,relIso04/F,sip3d/F,lostHits/I,convVeto/I,dxy/F,dz/F,hadronicOverEm/F,dEtaScTrkIn/F,dPhiScTrkIn/F,eInvMinusPInv/F,full5x5_sigmaIetaIeta/F,mvaTTV/F]", "nlep/I", "met_pt/F", "met_phi/F", "metSig/F", "ht/F", "nBTag/I", "Z1_l1_index_4l/I", "Z1_l2_index_4l/I", "nonZ1_l1_index_4l/I", "nonZ1_l2_index_4l/I", "Z2_l1_index_4l/I", "Z2_l2_index_4l/I", "Z1_phi_4l/F","Z1_pt_4l/F", "Z1_mass_4l/F", "Z1_eta_4l/F","Z1_lldPhi_4l/F", "Z1_lldR_4l/F", "Z1_cosThetaStar_4l/F","Higgs_mass/F", "Z2_phi_4l/F","Z2_pt_4l/F", "Z2_mass_4l/F", "Z2_eta_4l/F", "Z2_cosThetaStar_4l/F", "totalLeptonCharge/I", ] sequence = [] def getLooseLeptonMult( event, sample ):
sequence.append( getLooseLeptonMult ) # # Loop over channels # yields = {} allPlots = {} allModes = ['nLep'] for index, mode in enumerate(allModes): yields[mode] = {} logger.info("Working on mode %s", mode) if not args.noData: data_sample = Run2016 if args.year == 2016 else Run2017 data_sample.texName = "data" data_sample.setSelectionString([getFilterCut(isData=True, year=args.year), getLeptonSelection(mode)]) data_sample.name = "data" data_sample.read_variables = ["evt/I","run/I"] data_sample.style = styles.errorStyle(ROOT.kBlack) lumi_scale = data_sample.lumi/1000 if args.noData: lumi_scale = 35.9 if args.year == 2016 else 41.0 weight_ = lambda event, sample: event.weight lumi_scale = 300 TTZ_mc = TTZtoLLNuNu if args.year == 2016: mc = [ yt_TWW ] # mc = [ dim6top_TTW ] # mc = [ TWZ ] # mc = [ yt_TWZ ] # mc = [ yt_TWZ_filter, yt_TWZ ] # mc = [ yt_TWZ_filter ] for sample in mc: sample.style = styles.fillStyle(sample.color) for sample in mc + signals: sample.scale = lumi_scale #sample.read_variables = ['reweightBTagCSVv2_SF/F', 'reweightBTagDeepCSV_SF/F', 'reweightPU36fb/F', 'reweightTrigger_tight_4l/F', 'reweightLeptonTrackingSF_tight_4l/F', 'nTrueInt/F', 'reweightPU36fb/F', 'reweightLeptonSF_tight_4l/F']#, 'reweightLeptonSF_tight_4l/F'] sample.weight = lambda event, sample: 1 # if args.year == 2016: # sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.reweightTrigger_tight_4l*event.reweightLeptonTrackingSF_tight_4l*event.reweightPU36fb*event.reweightLeptonSF_tight_4l #*nTrueInt36fb_puRW(event.nTrueInt) # else: # sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.reweightTrigger_tight_4l*event.reweightPU36fb*event.reweightLeptonSF_tight_4l #*event.reweightLeptonSF_tight_4l #*nTrueInt36fb_puRW(event.nTrueInt) # tr = triggerSelector(args.year) sample.setSelectionString(getLeptonSelection(mode)) #sample.setSelectionString([getFilterCut(isData=False, year=args.year), getLeptonSelection(mode), tr.getSelection("MC")]) if not args.noData: stack = Stack(mc, data_sample) else: stack = Stack(mc) stack.extend( [ [s] for s in signals ] ) if args.small: for sample in stack.samples: sample.reduceFiles( to = 1 ) # Use some defaults Plot.setDefaults(stack = stack, weight = staticmethod(weight_), selectionString = cutInterpreter.cutString(args.selection), addOverFlowBin='both') plots = [] plots.append(Plot( texX = 'N_{l, loose}', texY = 'Number of Events', name = 'nLepLoose', attribute = lambda event, sample: event.nlep, #binning=[7,0.5,7.5], binning=[8,-0.5,7.5], )) plotting.fill(plots, read_variables = read_variables, sequence = sequence) if args.noData: yields[mode]["data"] = 0 # noData dataMCScale = 0 for plot in plots: for i, l in enumerate(plot.histos): for j, h in enumerate(l): test = h.Integral(4,6) print "INTEGRAL", test # print h.GetXaxis().SetBinLabel(3,"test") drawPlots(plots, mode, dataMCScale) allPlots[mode] = plots exit() # Add the different channels into SF and all for mode in ["comb1","comb2","comb3","all"]: yields[mode] = {} for y in yields[allModes[0]]: try: yields[mode][y] = sum(yields[c][y] for c in ['eeee','mueee','mumuee', 'mumumue', 'mumumumu']) except: yields[mode][y] = 0 dataMCScale = yields[mode]["data"]/yields[mode]["MC"] if yields[mode]["MC"] != 0 else float('nan') for plot in allPlots['mumumumu']: if mode=="comb1": tmp = allPlots['mumumue'] elif mode=="comb2": tmp = allPlots['mumuee'] elif mode=="comb3": tmp = allPlots['mueee'] else: tmp = allPlots['eeee'] for plot2 in (p for p in tmp if p.name == plot.name): for i, j in enumerate(list(itertools.chain.from_iterable(plot.histos))): for k, l in enumerate(list(itertools.chain.from_iterable(plot2.histos))): if i==k: j.Add(l) if mode == "all": drawPlots(allPlots['mumumumu'], mode, dataMCScale) logger.info( "Done with prefix %s and selectionString %s", args.selection, cutInterpreter.cutString(args.selection) )
leptons = [getObjDict(event, 'lep_', ['eta','pt','phi','charge', 'pdgId', 'sourceId','mediumMuonId'], i) for i in range(len(event.lep_pt))] lepLoose = [ l for l in leptons if l['pt'] > 10 and ((l['mediumMuonId'] and abs(l['pdgId'])==13) or abs(l['pdgId'])==11) ] event.nLepLoose = len(lepLoose)
identifier_body
analysisPlots_TWZ_nLep.py
#!/usr/bin/env python ''' Analysis script for standard plots ''' # # Standard imports and batch mode # import ROOT, os ROOT.gROOT.SetBatch(True) import itertools from math import sqrt, cos, sin, pi, acos, cosh from RootTools.core.standard import * from TopEFT.Tools.user import plot_directory from TopEFT.Tools.helpers import deltaPhi, getObjDict, getVarValue, deltaR, deltaR2 from TopEFT.Tools.objectSelection import getFilterCut from TopEFT.Tools.cutInterpreter import cutInterpreter from TopEFT.Tools.triggerSelector import triggerSelector from TopEFT.samples.color import color # for mt2ll from TopEFT.Tools.mt2Calculator import mt2Calculator mt2Calc = mt2Calculator() # # Arguments # import argparse argParser = argparse.ArgumentParser(description = "Argument parser") argParser.add_argument('--logLevel', action='store', default='INFO', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'], help="Log level for logging") argParser.add_argument('--noData', action='store_true', default=True, help='also plot data?') argParser.add_argument('--small', action='store_true', help='Run only on a small subset of the data?', ) argParser.add_argument('--plot_directory', action='store', default='Histo_Integral')#'analysisPlots_4l') argParser.add_argument('--selection', action='store', default='None')#'quadlepTWZ-onZ1-noZ2') # quadlep-lepSelQuad-njet2p-btag0p-onZ1-offZ2 or quadlep-lepSelQuad-njet2p-btag1p-onZ1-offZ2 for signal regions argParser.add_argument('--normalize', action='store_true', default=False, help="Normalize yields" ) argParser.add_argument('--year', action='store', default=2016, type=int, help="Which year?" ) args = argParser.parse_args() # PU reweighting on the fly from TopEFT.Tools.puProfileCache import puProfile from TopEFT.Tools.puReweighting import getReweightingFunction from TopEFT.samples.helpers import fromHeppySample # # Logger # import TopEFT.Tools.logger as logger import RootTools.core.logger as logger_rt logger = logger.get_logger( args.logLevel, logFile = None) logger_rt = logger_rt.get_logger(args.logLevel, logFile = None) if args.small: args.plot_directory += "_small" if args.noData: args.plot_directory += "_noData" if args.normalize: args.plot_directory += "_normalize" # # Make samples, will be searched for in the postProcessing directory # if args.year == 2016: data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/" postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/" from TopEFT.samples.cmgTuples_Data25ns_80X_07Aug17_postProcessed import * data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/" postProcessing_directory = "TopEFT_PP_2016_mva_v21/trilep/" from TopEFT.samples.cmgTuples_Summer16_mAODv2_postProcessed import * data_directory = "/afs/hephy.at/data/rschoefbeck01/cmgTuples/" signals = [] # # Text on the plots # def drawObjects( plotData, dataMCScale, lumi_scale ): tex = ROOT.TLatex() tex.SetNDC() tex.SetTextSize(0.04) tex.SetTextAlign(11) # align right lines = [ (0.15, 0.95, 'CMS Preliminary' if plotData else 'CMS Simulation'), (0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV) Scale %3.2f'% ( lumi_scale, dataMCScale ) ) if plotData else (0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV)' % lumi_scale) ] return [tex.DrawLatex(*l) for l in lines] scaling = { i+1:0 for i in range(len(signals)) } def drawPlots(plots, mode, dataMCScale): for log in [False, True]: plot_directory_ = os.path.join(plot_directory, 'analysisPlots', args.plot_directory, mode + ("_log" if log else ""), args.selection) for plot in plots: if not max(l[0].GetMaximum() for l in plot.histos): continue # Empty plot if not args.noData: if mode == "all": plot.histos[1][0].legendText = "Data" if mode == "SF": plot.histos[1][0].legendText = "Data (SF)" extensions_ = ["pdf", "png", "root"]# if mode == 'all' else ['png'] plotting.draw(plot, plot_directory = plot_directory_, extensions = extensions_, ratio = {'yRange':(0.1,1.9)} if not args.noData else None, logX = False, logY = log, sorting = True, yRange = (0.03, "auto") if log else (0.001, "auto"), scaling = scaling if args.normalize else {}, legend = [ (0.15,0.9-0.03*sum(map(len, plot.histos)),0.9,0.9), 2], drawObjects = drawObjects( not args.noData, dataMCScale , lumi_scale ), copyIndexPHP = True, ) def getLeptonSelection( mode ): # if mode=="nLep": return "nMuons_tight_4l+nElectrons_tight_4l>=1" if mode =="nLep": return "(1)" # # Read variables and sequences # read_variables = ["weight/F", "jet[pt/F,eta/F,phi/F,btagCSV/F,DFb/F,DFbb/F,id/I]", "njet/I","nJetSelected/I", "lep[mediumMuonId/I,pt/F,eta/F,phi/F,pdgId/I,miniRelIso/F,relIso03/F,relIso04/F,sip3d/F,lostHits/I,convVeto/I,dxy/F,dz/F,hadronicOverEm/F,dEtaScTrkIn/F,dPhiScTrkIn/F,eInvMinusPInv/F,full5x5_sigmaIetaIeta/F,mvaTTV/F]", "nlep/I", "met_pt/F", "met_phi/F", "metSig/F", "ht/F", "nBTag/I", "Z1_l1_index_4l/I", "Z1_l2_index_4l/I", "nonZ1_l1_index_4l/I", "nonZ1_l2_index_4l/I", "Z2_l1_index_4l/I", "Z2_l2_index_4l/I", "Z1_phi_4l/F","Z1_pt_4l/F", "Z1_mass_4l/F", "Z1_eta_4l/F","Z1_lldPhi_4l/F", "Z1_lldR_4l/F", "Z1_cosThetaStar_4l/F","Higgs_mass/F", "Z2_phi_4l/F","Z2_pt_4l/F", "Z2_mass_4l/F", "Z2_eta_4l/F", "Z2_cosThetaStar_4l/F", "totalLeptonCharge/I", ] sequence = [] def getLooseLeptonMult( event, sample ): leptons = [getObjDict(event, 'lep_', ['eta','pt','phi','charge', 'pdgId', 'sourceId','mediumMuonId'], i) for i in range(len(event.lep_pt))] lepLoose = [ l for l in leptons if l['pt'] > 10 and ((l['mediumMuonId'] and abs(l['pdgId'])==13) or abs(l['pdgId'])==11) ] event.nLepLoose = len(lepLoose) sequence.append( getLooseLeptonMult ) # # Loop over channels # yields = {} allPlots = {} allModes = ['nLep'] for index, mode in enumerate(allModes): yields[mode] = {} logger.info("Working on mode %s", mode) if not args.noData: data_sample = Run2016 if args.year == 2016 else Run2017 data_sample.texName = "data" data_sample.setSelectionString([getFilterCut(isData=True, year=args.year), getLeptonSelection(mode)]) data_sample.name = "data" data_sample.read_variables = ["evt/I","run/I"] data_sample.style = styles.errorStyle(ROOT.kBlack) lumi_scale = data_sample.lumi/1000 if args.noData: lumi_scale = 35.9 if args.year == 2016 else 41.0 weight_ = lambda event, sample: event.weight lumi_scale = 300 TTZ_mc = TTZtoLLNuNu if args.year == 2016: mc = [ yt_TWW ] # mc = [ dim6top_TTW ] # mc = [ TWZ ] # mc = [ yt_TWZ ] # mc = [ yt_TWZ_filter, yt_TWZ ] # mc = [ yt_TWZ_filter ] for sample in mc: sample.style = styles.fillStyle(sample.color) for sample in mc + signals: sample.scale = lumi_scale #sample.read_variables = ['reweightBTagCSVv2_SF/F', 'reweightBTagDeepCSV_SF/F', 'reweightPU36fb/F', 'reweightTrigger_tight_4l/F', 'reweightLeptonTrackingSF_tight_4l/F', 'nTrueInt/F', 'reweightPU36fb/F', 'reweightLeptonSF_tight_4l/F']#, 'reweightLeptonSF_tight_4l/F'] sample.weight = lambda event, sample: 1 # if args.year == 2016: # sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.reweightTrigger_tight_4l*event.reweightLeptonTrackingSF_tight_4l*event.reweightPU36fb*event.reweightLeptonSF_tight_4l #*nTrueInt36fb_puRW(event.nTrueInt) # else:
sample.setSelectionString(getLeptonSelection(mode)) #sample.setSelectionString([getFilterCut(isData=False, year=args.year), getLeptonSelection(mode), tr.getSelection("MC")]) if not args.noData: stack = Stack(mc, data_sample) else: stack = Stack(mc) stack.extend( [ [s] for s in signals ] ) if args.small: for sample in stack.samples: sample.reduceFiles( to = 1 ) # Use some defaults Plot.setDefaults(stack = stack, weight = staticmethod(weight_), selectionString = cutInterpreter.cutString(args.selection), addOverFlowBin='both') plots = [] plots.append(Plot( texX = 'N_{l, loose}', texY = 'Number of Events', name = 'nLepLoose', attribute = lambda event, sample: event.nlep, #binning=[7,0.5,7.5], binning=[8,-0.5,7.5], )) plotting.fill(plots, read_variables = read_variables, sequence = sequence) if args.noData: yields[mode]["data"] = 0 # noData dataMCScale = 0 for plot in plots: for i, l in enumerate(plot.histos): for j, h in enumerate(l): test = h.Integral(4,6) print "INTEGRAL", test # print h.GetXaxis().SetBinLabel(3,"test") drawPlots(plots, mode, dataMCScale) allPlots[mode] = plots exit() # Add the different channels into SF and all for mode in ["comb1","comb2","comb3","all"]: yields[mode] = {} for y in yields[allModes[0]]: try: yields[mode][y] = sum(yields[c][y] for c in ['eeee','mueee','mumuee', 'mumumue', 'mumumumu']) except: yields[mode][y] = 0 dataMCScale = yields[mode]["data"]/yields[mode]["MC"] if yields[mode]["MC"] != 0 else float('nan') for plot in allPlots['mumumumu']: if mode=="comb1": tmp = allPlots['mumumue'] elif mode=="comb2": tmp = allPlots['mumuee'] elif mode=="comb3": tmp = allPlots['mueee'] else: tmp = allPlots['eeee'] for plot2 in (p for p in tmp if p.name == plot.name): for i, j in enumerate(list(itertools.chain.from_iterable(plot.histos))): for k, l in enumerate(list(itertools.chain.from_iterable(plot2.histos))): if i==k: j.Add(l) if mode == "all": drawPlots(allPlots['mumumumu'], mode, dataMCScale) logger.info( "Done with prefix %s and selectionString %s", args.selection, cutInterpreter.cutString(args.selection) )
# sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.reweightTrigger_tight_4l*event.reweightPU36fb*event.reweightLeptonSF_tight_4l #*event.reweightLeptonSF_tight_4l #*nTrueInt36fb_puRW(event.nTrueInt) # tr = triggerSelector(args.year)
random_line_split
main.go
package main import ( "encoding/binary" "flag" "fmt" "io" "io/ioutil" "os" ) const ( OP_IMM = 0x13 OP_LUI = 0x37 OP_AUIPC = 0x17 OP = 0x33 OP_JAL = 0x6f OP_JALR = 0x67 OP_BRANCH = 0x63 OP_LOAD = 0x03 OP_STORE = 0x23 OP_SYSTEM = 0x73 ) // OP_IMM const ( FUNCT_ADDI = 0 FUNCT_SLLI = 1 FUNCT_SLTI = 2 FUNCT_SLTUI = 3 FUNCT_XORI = 4 FUNCT_SRXI = 5 FUNCT_ORI = 6 FUNCT_ANDI = 7 ) // OP const ( FUNCT_ADD_SUB = 0 FUNCT_SLL = 1 FUNCT_SLT = 2 FUNCT_SLTU = 3 FUNCT_XOR = 4 FUNCT_SRX = 5 FUNCT_OR = 6 FUNCT_AND = 7 ) // BRANCH const ( FUNCT_BEQ = 0 FUNCT_BNE = 1 FUNCT_BLT = 4 FUNCT_BGE = 5 FUNCT_BLTU = 6 FUNCT_BGEU = 7 ) // SYSTEM const ( FUNCT_CSRRW = 1 FUNCT_CSRRS = 2 FUNCT_CSRRC = 3 FUNCT_PRIV = 0 ) // SYSTEM PRIV const ( PRIV_EBREAK = 0x1 PRIV_ECALL = 0x00 ) // CSRs const ( CsrM = 0x300 CsrS = 0x100 CsrU = 0x000 CsrStatus = 0x000 CsrIe = 0x004 CsrTvec = 0x005 CsrScratch = 0x040 CsrEpc = 0x041 CsrCause = 0x042 CsrTval = 0x043 CsrCycle = 0xc00 CsrCycleh = 0xc80 CsrTime = 0xc01 CsrTimeh = 0xc81 CsrInstret = 0xc02 CsrInstreth = 0xc82 CsrHalt = 0x3ff ) // Exceptions const ( ExceptionIllegalInstruction = 2 ExceptionBreakpoint = 3 ExceptionEcallM = 11 ) const ( RegZero = 0 RegRA = 1 RegSP = 2 RegGP = 3 RegTP = 4 RegT0 = 5 RegT1 = 6 RegT2 = 7 RegFP = 8 RegS0 = 8 RegS1 = 9 RegA0 = 10 RegA1 = 11 RegA2 = 12 RegA3 = 13 RegA4 = 14 RegA5 = 15 RegA6 = 16 RegA7 = 17 RegS2 = 18 RegS3 = 19 RegS4 = 20 RegS5 = 21 RegS6 = 22 RegS7 = 23 RegS8 = 24 RegS9 = 25 RegS10 = 26 RegS11 = 27 RegST3 = 28 RegST4 = 29 RegST5 = 30 RegST6 = 31 ) var _RegNames []string = []string{ "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "FP", "S0", "S1", "A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7", "S2", "S3", "S4", "S5", "S6", "S7", "S8", "S9", "S10", "S11", "ST3", "ST4", "ST5", "ST6", } type Memory interface { LoadWord(addr uint32) uint32 LoadHalfWord(addr uint32) uint16 LoadByte(addr uint32) uint8 StoreWord(addr uint32, v uint32) StoreHalfWord(addr uint32, v uint16) StoreByte(addr uint32, v uint8) } type Ram struct { memory []uint8 } func NewRam(size uint32) *Ram { return NewRamFromBuffer(make([]uint8, size)) } func NewRamFromBuffer(buf []uint8) *Ram { return &Ram{buf} } func (mem *Ram) LoadWord(addr uint32) uint32 { return binary.LittleEndian.Uint32(mem.memory[addr : addr+4]) } func (mem *Ram) LoadHalfWord(addr uint32) uint16 { return binary.LittleEndian.Uint16(mem.memory[addr : addr+2]) } func (mem *Ram) LoadByte(addr uint32) uint8 { return mem.memory[addr] } func (mem *Ram) StoreWord(addr uint32, v uint32) { binary.LittleEndian.PutUint32(mem.memory[addr:addr+4], v) } func (mem *Ram) StoreHalfWord(addr uint32, v uint16) { binary.LittleEndian.PutUint16(mem.memory[addr:addr+2], v) } func (mem *Ram) StoreByte(addr uint32, v uint8) { mem.memory[addr] = v } type Range struct { Addr, Size uint32 Memory Memory } type Mmu struct { ranges []Range } func NewMmu() *Mmu { return &Mmu{} } func (mmu *Mmu) AddRange(addr, size uint32, mem Memory) { //@todo: sanity checks mmu.ranges = append(mmu.ranges, Range{addr, size, mem}) } func (mmu *Mmu) findRange(addr uint32) (*Range, uint32) { for _, r := range mmu.ranges { if addr >= r.Addr && addr < (r.Addr+r.Size) { return &r, addr - r.Addr } } return nil, 0 } func (mmu *Mmu) LoadWord(addr uint32) uint32 { r, addr := mmu.findRange(addr) if r != nil { return r.Memory.LoadWord(addr) } return 0 } func (mmu *Mmu) LoadHalfWord(addr uint32) uint16 { r, addr := mmu.findRange(addr) if r != nil { return r.Memory.LoadHalfWord(addr) } return 0 } func (mmu *Mmu) LoadByte(addr uint32) uint8 { r, addr := mmu.findRange(addr) if r != nil { return r.Memory.LoadByte(addr) } return 0 } func (mmu *Mmu) StoreWord(addr uint32, v uint32) { r, addr := mmu.findRange(addr) if r != nil { r.Memory.StoreWord(addr, v) } } func (mmu *Mmu) StoreHalfWord(addr uint32, v uint16) { r, addr := mmu.findRange(addr) if r != nil { r.Memory.StoreHalfWord(addr, v) } } func (mmu *Mmu) StoreByte(addr uint32, v uint8) { r, addr := mmu.findRange(addr) if r != nil { r.Memory.StoreByte(addr, v) } } type MmioSerial struct { w io.Writer r io.Reader } func (s *MmioSerial) LoadWord(addr uint32) uint32 { return uint32(s.LoadByte(addr)) } func (s *MmioSerial) LoadHalfWord(addr uint32) uint16 { return uint16(s.LoadByte(addr)) } func (s *MmioSerial) LoadByte(addr uint32) uint8 { if s.r == nil { return 0 } var b [1]uint8 s.r.Read(b[:]) return b[0] } func (s *MmioSerial) StoreWord(addr uint32, v uint32) { s.StoreByte(addr, uint8(v)) } func (s *MmioSerial) StoreHalfWord(addr uint32, v uint16) { s.StoreByte(addr, uint8(v)) } func (s *MmioSerial) StoreByte(addr uint32, v uint8) { if s.w == nil { return } b := []uint8{v} s.w.Write(b) } type Cpu struct { initialAddr uint32 registers [32]uint32 pc uint32 memory Memory halt bool cycles uint64 ticks uint64 instret uint64 mtvec uint32 mcause uint32 mepc uint32 mtval uint32 mscratch uint32 haltValue uint32 } func New(memory Memory, initialAddr uint32) *Cpu { cpu := &Cpu{} cpu.initialAddr = initialAddr cpu.memory = memory cpu.Reset() return cpu } func (cpu *Cpu) LoadWord(addr uint32) uint32 { return cpu.memory.LoadWord(addr) } func (cpu *Cpu) LoadHalfWord(addr uint32) uint16 { return cpu.memory.LoadHalfWord(addr) } func (cpu *Cpu) LoadByte(addr uint32) uint8 { return cpu.memory.LoadByte(addr) } func (cpu *Cpu) StoreWord(addr uint32, v uint32) { cpu.memory.StoreWord(addr, v) } func (cpu *Cpu) StoreHalfWord(addr uint32, v uint16) { cpu.memory.StoreHalfWord(addr, v) } func (cpu *Cpu) StoreByte(addr uint32, v uint8) { cpu.memory.StoreByte(addr, v) } func (cpu *Cpu) IsValidCsr(csr uint32) bool { if csr == CsrHalt { return true } priv := csr & ^uint32(0xcff) // save priv csr &= 0xcff // ignore priv switch csr { case CsrCycle, CsrCycleh, CsrTime, CsrTimeh, CsrInstret, CsrInstreth: return true } if priv != CsrM { return false } switch csr { case CsrTvec, CsrTval, CsrCause, CsrEpc, CsrScratch: return true } return false } func (cpu *Cpu) GetCsr(csr uint32) uint32 { if csr == CsrHalt { return cpu.haltValue } priv := csr & ^uint32(0xcff) // save priv csr &= 0xcff // ignore priv switch csr { case CsrCycle: return uint32(cpu.cycles) case CsrCycleh: return uint32(cpu.cycles >> 32) case CsrTime: return uint32(cpu.ticks) case CsrTimeh: return uint32(cpu.ticks >> 32) case CsrInstret: return uint32(cpu.instret) case CsrInstreth: return uint32(cpu.instret >> 32) } // we only have machine mode csrs for everything else if priv != CsrM { panic(fmt.Sprintf("invalid csr: 0x%03x\n", csr)) } switch csr { case CsrTvec: return cpu.mtvec & 0xfffffffc case CsrTval: return cpu.mtval case CsrCause: return cpu.mcause case CsrEpc: return cpu.mepc & 0xfffffffe case CsrScratch: return cpu.mscratch default: fmt.Printf("invalid csr: 0x%03x\n", csr) } return 0 } func (cpu *Cpu) SetCsr(csr uint32, v uint32) { if csr == CsrHalt { cpu.halt = true cpu.haltValue = v return } priv := csr & ^uint32(0xcff) // save priv if priv != CsrM { panic(fmt.Sprintf("invalid csr: 0x%03x\n", csr)) } csr &= 0xcff // ignore priv switch csr { case CsrTvec: cpu.mtvec = v & 0xfffffffc case CsrCause: cpu.mcause = v case CsrTval: cpu.mtval = v case CsrScratch: cpu.mscratch = v case CsrEpc: cpu.mepc = v & 0xfffffffe } // do nothing } func (cpu *Cpu) Reset() { for i, _ := range cpu.registers { cpu.registers[i] = 0 } cpu.pc = cpu.initialAddr cpu.halt = false cpu.cycles = 0 cpu.ticks = 0 cpu.instret = 0 cpu.mtvec = 0 cpu.mcause = 0 cpu.mepc = 0 cpu.mtval = 0 cpu.mscratch = 0 } func (cpu *Cpu) GetReg(idx uint8) uint32 { if idx == 0 { return 0 } else if idx > 0 && idx < 32 { return cpu.registers[idx] } panic(fmt.Sprint("invalid register ", idx)) } func (cpu *Cpu) SetReg(idx uint8, v uint32) { if idx == 0 { // do nothing } else if idx > 0 && idx < 32 { cpu.registers[idx] = v } else { panic(fmt.Sprint("invalid register ", idx)) } } func (cpu *Cpu) Execute() { for !cpu.halt { cpu.Step() } } func (cpu *Cpu) Halt() { cpu.halt = true } func (cpu *Cpu) Debug() string { res := "" for i := uint8(1); i < 32; i++ { res += fmt.Sprintf("%s: 0x%08x ", _RegNames[i], cpu.GetReg(i)) } res += fmt.Sprintf("pc: 0x%08x ", cpu.pc) return res } func (cpu *Cpu) fetch() uint32 { inst := cpu.LoadWord(cpu.pc) cpu.pc += 4 return inst } func (cpu *Cpu) decode(inst uint32) { // we are only allowed to trap in the decode phase // this makes it so the trap function is only visible here trap := func(cause uint32, value uint32) { cpu.SetCsr(CsrTval|CsrM, value) cpu.SetCsr(CsrEpc|CsrM, cpu.pc-4) cpu.pc = cpu.GetCsr(CsrTvec | CsrM) cpu.SetCsr(CsrCause|CsrM, cause) cpu.cycles += 1 cpu.ticks += 1 } opcode := inst & 0x7f decode: switch opcode { case OP_IMM: _, rd, funct, rs1, imm := itype(inst) rs1v := cpu.GetReg(rs1) var res uint32 switch funct { case FUNCT_ADDI: res = rs1v + imm case FUNCT_SLTI: if int32(rs1v) < int32(imm) { res = 1 } else { res = 0 } case FUNCT_SLTUI: if rs1v < imm { res = 1 } else { res = 0 } case FUNCT_XORI: res = rs1v ^ imm case FUNCT_ANDI: res = rs1v & imm case FUNCT_ORI: res = rs1v | imm case FUNCT_SLLI: res = rs1v << imm case FUNCT_SRXI: if imm&0x400 != 0 { // golang does arithmatic shift for ints res = uint32(int32(rs1v) >> (imm & 0x1f)) } else { res = rs1v >> (imm & 0x1f) } default: trap(ExceptionIllegalInstruction, inst) break decode } cpu.SetReg(rd, res) case OP_LUI: _, rd, imm := utype(inst) cpu.SetReg(rd, imm<<12) case OP_AUIPC: _, rd, imm := utype(inst) cpu.SetReg(rd, cpu.pc+(imm<<12)-4) case OP: _, rd, funct3, rs1, rs2, funct7 := rtype(inst) rs1v := cpu.GetReg(rs1) rs2v := cpu.GetReg(rs2) var res uint32 switch funct3 { case FUNCT_ADD_SUB: if funct7&0x20 == 0 { res = rs1v + rs2v } else { res = rs1v - rs2v } case FUNCT_SLT: if int32(rs1v) < int32(rs2v) { res = 1 } else { res = 0 } case FUNCT_SLTU: if rs1v < rs2v { res = 1 } else { res = 0 } case FUNCT_AND: res = rs1v & rs2v case FUNCT_OR: res = rs1v | rs2v case FUNCT_XOR: res = rs1v ^ rs2v case FUNCT_SLL: res = rs1v << (rs2v & 0x1f) case FUNCT_SRX: if funct7&0x20 == 0 { res = rs1v >> (rs2v & 0x1f) } else { res = uint32(int32(rs1v) >> (rs2v & 0x1f)) } default:
case OP_JAL: _, rd, imm := jtype(inst) cpu.SetReg(rd, cpu.pc) cpu.pc += imm - 4 case OP_JALR: _, rd, _, rs1, imm := itype(inst) rs1v := cpu.GetReg(rs1) cpu.SetReg(rd, cpu.pc) cpu.pc = (rs1v + imm) & 0xfffffffe case OP_BRANCH: _, funct3, rs1, rs2, imm := btype(inst) rs1v := cpu.GetReg(rs1) rs2v := cpu.GetReg(rs2) var shouldBranch bool switch funct3 { case FUNCT_BEQ: shouldBranch = rs1v == rs2v case FUNCT_BNE: shouldBranch = rs1v != rs2v case FUNCT_BLT: shouldBranch = int32(rs1v) < int32(rs2v) case FUNCT_BLTU: shouldBranch = rs1v < rs2v case FUNCT_BGE: shouldBranch = int32(rs1v) >= int32(rs2v) case FUNCT_BGEU: shouldBranch = rs1v >= rs2v default: trap(ExceptionIllegalInstruction, inst) break decode } if shouldBranch { cpu.pc += imm - 4 } case OP_LOAD: _, dest, width, base, imm := itype(inst) addr := cpu.GetReg(base) + imm var res uint32 switch width { case 0: // LB res = signExtend(uint32(cpu.LoadByte(addr)), 8) case 1: // LH res = signExtend(uint32(cpu.LoadHalfWord(addr)), 16) case 2: // LW res = cpu.LoadWord(addr) case 4: // LBU res = uint32(cpu.LoadByte(addr)) case 5: // LHU res = uint32(cpu.LoadHalfWord(addr)) default: trap(ExceptionIllegalInstruction, inst) break decode } cpu.SetReg(dest, res) case OP_STORE: _, funct, rs1, rs2, imm := stype(inst) addr := cpu.GetReg(rs1) + imm rs2v := cpu.GetReg(rs2) switch funct { case 0: // SB cpu.StoreByte(addr, uint8(rs2v)) case 1: // SH cpu.StoreHalfWord(addr, uint16(rs2v)) case 2: // LW cpu.StoreWord(addr, rs2v) default: trap(ExceptionIllegalInstruction, inst) break decode } case OP_SYSTEM: _, rd, funct3, rs1, imm := itype(inst) switch funct3 { case FUNCT_CSRRW, FUNCT_CSRRS, FUNCT_CSRRC: csr := imm & 0xfff if !cpu.IsValidCsr(csr) { trap(ExceptionIllegalInstruction, inst) break decode } // check if we are trying to write to an RO csr isReadOnly := csr > 0xc00 if isReadOnly && rs1 != 0 { trap(ExceptionIllegalInstruction, inst) break decode } csrv := cpu.GetCsr(csr) rs1v := cpu.GetReg(rs1) cpu.SetReg(rd, csrv) if rs1 != 0 { switch funct3 { case FUNCT_CSRRW: csrv = rs1v case FUNCT_CSRRS: csrv = csrv & rs1v case FUNCT_CSRRC: csrv = csrv & (^rs1v) } cpu.SetCsr(csr, csrv) } case FUNCT_PRIV: switch imm { case PRIV_ECALL: trap(ExceptionEcallM, cpu.pc-4) break decode case PRIV_EBREAK: trap(ExceptionBreakpoint, cpu.pc-4) break decode default: trap(ExceptionIllegalInstruction, inst) break decode } default: trap(ExceptionIllegalInstruction, inst) break decode } default: trap(ExceptionIllegalInstruction, inst) } cpu.cycles += 1 cpu.ticks += 1 cpu.instret += 1 } func (cpu *Cpu) Step() { if cpu.halt { return } inst := cpu.fetch() cpu.decode(inst) } func bitrange(inst uint32, fromBit, len uint) uint32 { return (inst >> fromBit) & ((1 << len) - 1) } func signExtend(n uint32, bit uint) uint32 { if n&(1<<bit) != 0 { n |= ^((1 << bit) - 1) } return n } func btype(inst uint32) (opcode, funct3, rs1, rs2 uint8, imm uint32) { imm |= bitrange(inst, 8, 4) << 1 imm |= bitrange(inst, 25, 6) << 5 imm |= bitrange(inst, 7, 1) << 11 imm |= bitrange(inst, 31, 1) << 12 imm = signExtend(imm, 12) return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), uint8(bitrange(inst, 20, 5)), imm } func itype(inst uint32) (opcode, rd, funct, rs1 uint8, imm uint32) { return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), signExtend(bitrange(inst, 20, 12), 11) } func rtype(inst uint32) (opcode, rd, funct3, rs1, rs2, funct7 uint8) { return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), uint8(bitrange(inst, 20, 5)), uint8(bitrange(inst, 25, 7)) } func utype(inst uint32) (opcode, rd uint8, imm uint32) { return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), bitrange(inst, 12, 20) } func jtype(inst uint32) (opcode, rd uint8, imm uint32) { imm |= bitrange(inst, 21, 10) << 1 imm |= bitrange(inst, 20, 1) << 11 imm |= bitrange(inst, 12, 8) << 12 imm |= bitrange(inst, 31, 1) << 20 imm = signExtend(imm, 20) return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), imm } func stype(inst uint32) (opcode, funct3, rs1, rs2 uint8, imm uint32) { imm |= bitrange(inst, 7, 5) imm |= bitrange(inst, 25, 7) << 5 imm = signExtend(imm, 11) return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), uint8(bitrange(inst, 20, 5)), imm } type Board struct { cpu *Cpu } func (b *Board) Cpu() *Cpu { return b.cpu } func (b *Board) Execute() { b.cpu.Execute() } func (b *Board) Step() { b.cpu.Step() } const BoardInitialAddr = 0x100 func NewBoard(prog []uint8, in io.Reader, out io.Writer) *Board { mmu := NewMmu() mmu.AddRange(BoardInitialAddr, uint32(len(prog)), NewRamFromBuffer(prog)) mmu.AddRange(0xfffffffe, 1, &MmioSerial{r: in, w: out}) cpu := New(mmu, BoardInitialAddr) cpu.Reset() return &Board{ cpu: cpu, } } func main() { flag.Parse() if flag.NArg() != 1 { flag.Usage() flag.PrintDefaults() os.Exit(1) } prog, err := ioutil.ReadFile(flag.Arg(0)) if err != nil { panic(err) } board := NewBoard(prog, os.Stdin, os.Stdout) board.Execute() os.Exit(int(board.Cpu().GetCsr(CsrHalt))) }
trap(ExceptionIllegalInstruction, inst) break decode } cpu.SetReg(rd, res)
random_line_split
main.go
package main import ( "encoding/binary" "flag" "fmt" "io" "io/ioutil" "os" ) const ( OP_IMM = 0x13 OP_LUI = 0x37 OP_AUIPC = 0x17 OP = 0x33 OP_JAL = 0x6f OP_JALR = 0x67 OP_BRANCH = 0x63 OP_LOAD = 0x03 OP_STORE = 0x23 OP_SYSTEM = 0x73 ) // OP_IMM const ( FUNCT_ADDI = 0 FUNCT_SLLI = 1 FUNCT_SLTI = 2 FUNCT_SLTUI = 3 FUNCT_XORI = 4 FUNCT_SRXI = 5 FUNCT_ORI = 6 FUNCT_ANDI = 7 ) // OP const ( FUNCT_ADD_SUB = 0 FUNCT_SLL = 1 FUNCT_SLT = 2 FUNCT_SLTU = 3 FUNCT_XOR = 4 FUNCT_SRX = 5 FUNCT_OR = 6 FUNCT_AND = 7 ) // BRANCH const ( FUNCT_BEQ = 0 FUNCT_BNE = 1 FUNCT_BLT = 4 FUNCT_BGE = 5 FUNCT_BLTU = 6 FUNCT_BGEU = 7 ) // SYSTEM const ( FUNCT_CSRRW = 1 FUNCT_CSRRS = 2 FUNCT_CSRRC = 3 FUNCT_PRIV = 0 ) // SYSTEM PRIV const ( PRIV_EBREAK = 0x1 PRIV_ECALL = 0x00 ) // CSRs const ( CsrM = 0x300 CsrS = 0x100 CsrU = 0x000 CsrStatus = 0x000 CsrIe = 0x004 CsrTvec = 0x005 CsrScratch = 0x040 CsrEpc = 0x041 CsrCause = 0x042 CsrTval = 0x043 CsrCycle = 0xc00 CsrCycleh = 0xc80 CsrTime = 0xc01 CsrTimeh = 0xc81 CsrInstret = 0xc02 CsrInstreth = 0xc82 CsrHalt = 0x3ff ) // Exceptions const ( ExceptionIllegalInstruction = 2 ExceptionBreakpoint = 3 ExceptionEcallM = 11 ) const ( RegZero = 0 RegRA = 1 RegSP = 2 RegGP = 3 RegTP = 4 RegT0 = 5 RegT1 = 6 RegT2 = 7 RegFP = 8 RegS0 = 8 RegS1 = 9 RegA0 = 10 RegA1 = 11 RegA2 = 12 RegA3 = 13 RegA4 = 14 RegA5 = 15 RegA6 = 16 RegA7 = 17 RegS2 = 18 RegS3 = 19 RegS4 = 20 RegS5 = 21 RegS6 = 22 RegS7 = 23 RegS8 = 24 RegS9 = 25 RegS10 = 26 RegS11 = 27 RegST3 = 28 RegST4 = 29 RegST5 = 30 RegST6 = 31 ) var _RegNames []string = []string{ "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "FP", "S0", "S1", "A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7", "S2", "S3", "S4", "S5", "S6", "S7", "S8", "S9", "S10", "S11", "ST3", "ST4", "ST5", "ST6", } type Memory interface { LoadWord(addr uint32) uint32 LoadHalfWord(addr uint32) uint16 LoadByte(addr uint32) uint8 StoreWord(addr uint32, v uint32) StoreHalfWord(addr uint32, v uint16) StoreByte(addr uint32, v uint8) } type Ram struct { memory []uint8 } func NewRam(size uint32) *Ram { return NewRamFromBuffer(make([]uint8, size)) } func NewRamFromBuffer(buf []uint8) *Ram { return &Ram{buf} } func (mem *Ram) LoadWord(addr uint32) uint32 { return binary.LittleEndian.Uint32(mem.memory[addr : addr+4]) } func (mem *Ram) LoadHalfWord(addr uint32) uint16 { return binary.LittleEndian.Uint16(mem.memory[addr : addr+2]) } func (mem *Ram) LoadByte(addr uint32) uint8 { return mem.memory[addr] } func (mem *Ram) StoreWord(addr uint32, v uint32) { binary.LittleEndian.PutUint32(mem.memory[addr:addr+4], v) } func (mem *Ram) StoreHalfWord(addr uint32, v uint16) { binary.LittleEndian.PutUint16(mem.memory[addr:addr+2], v) } func (mem *Ram) StoreByte(addr uint32, v uint8) { mem.memory[addr] = v } type Range struct { Addr, Size uint32 Memory Memory } type Mmu struct { ranges []Range } func NewMmu() *Mmu { return &Mmu{} } func (mmu *Mmu) AddRange(addr, size uint32, mem Memory) { //@todo: sanity checks mmu.ranges = append(mmu.ranges, Range{addr, size, mem}) } func (mmu *Mmu) findRange(addr uint32) (*Range, uint32) { for _, r := range mmu.ranges { if addr >= r.Addr && addr < (r.Addr+r.Size) { return &r, addr - r.Addr } } return nil, 0 } func (mmu *Mmu) LoadWord(addr uint32) uint32 { r, addr := mmu.findRange(addr) if r != nil { return r.Memory.LoadWord(addr) } return 0 } func (mmu *Mmu) LoadHalfWord(addr uint32) uint16 { r, addr := mmu.findRange(addr) if r != nil { return r.Memory.LoadHalfWord(addr) } return 0 } func (mmu *Mmu) LoadByte(addr uint32) uint8 { r, addr := mmu.findRange(addr) if r != nil { return r.Memory.LoadByte(addr) } return 0 } func (mmu *Mmu) StoreWord(addr uint32, v uint32) { r, addr := mmu.findRange(addr) if r != nil { r.Memory.StoreWord(addr, v) } } func (mmu *Mmu) StoreHalfWord(addr uint32, v uint16) { r, addr := mmu.findRange(addr) if r != nil { r.Memory.StoreHalfWord(addr, v) } } func (mmu *Mmu) StoreByte(addr uint32, v uint8) { r, addr := mmu.findRange(addr) if r != nil { r.Memory.StoreByte(addr, v) } } type MmioSerial struct { w io.Writer r io.Reader } func (s *MmioSerial) LoadWord(addr uint32) uint32 { return uint32(s.LoadByte(addr)) } func (s *MmioSerial) LoadHalfWord(addr uint32) uint16 { return uint16(s.LoadByte(addr)) } func (s *MmioSerial) LoadByte(addr uint32) uint8 { if s.r == nil { return 0 } var b [1]uint8 s.r.Read(b[:]) return b[0] } func (s *MmioSerial) StoreWord(addr uint32, v uint32) { s.StoreByte(addr, uint8(v)) } func (s *MmioSerial) StoreHalfWord(addr uint32, v uint16) { s.StoreByte(addr, uint8(v)) } func (s *MmioSerial) StoreByte(addr uint32, v uint8) { if s.w == nil { return } b := []uint8{v} s.w.Write(b) } type Cpu struct { initialAddr uint32 registers [32]uint32 pc uint32 memory Memory halt bool cycles uint64 ticks uint64 instret uint64 mtvec uint32 mcause uint32 mepc uint32 mtval uint32 mscratch uint32 haltValue uint32 } func New(memory Memory, initialAddr uint32) *Cpu { cpu := &Cpu{} cpu.initialAddr = initialAddr cpu.memory = memory cpu.Reset() return cpu } func (cpu *Cpu) LoadWord(addr uint32) uint32 { return cpu.memory.LoadWord(addr) } func (cpu *Cpu) LoadHalfWord(addr uint32) uint16 { return cpu.memory.LoadHalfWord(addr) } func (cpu *Cpu) LoadByte(addr uint32) uint8 { return cpu.memory.LoadByte(addr) } func (cpu *Cpu) StoreWord(addr uint32, v uint32) { cpu.memory.StoreWord(addr, v) } func (cpu *Cpu) StoreHalfWord(addr uint32, v uint16) { cpu.memory.StoreHalfWord(addr, v) } func (cpu *Cpu) StoreByte(addr uint32, v uint8) { cpu.memory.StoreByte(addr, v) } func (cpu *Cpu) IsValidCsr(csr uint32) bool { if csr == CsrHalt { return true } priv := csr & ^uint32(0xcff) // save priv csr &= 0xcff // ignore priv switch csr { case CsrCycle, CsrCycleh, CsrTime, CsrTimeh, CsrInstret, CsrInstreth: return true } if priv != CsrM { return false } switch csr { case CsrTvec, CsrTval, CsrCause, CsrEpc, CsrScratch: return true } return false } func (cpu *Cpu) GetCsr(csr uint32) uint32 { if csr == CsrHalt { return cpu.haltValue } priv := csr & ^uint32(0xcff) // save priv csr &= 0xcff // ignore priv switch csr { case CsrCycle: return uint32(cpu.cycles) case CsrCycleh: return uint32(cpu.cycles >> 32) case CsrTime: return uint32(cpu.ticks) case CsrTimeh: return uint32(cpu.ticks >> 32) case CsrInstret: return uint32(cpu.instret) case CsrInstreth: return uint32(cpu.instret >> 32) } // we only have machine mode csrs for everything else if priv != CsrM { panic(fmt.Sprintf("invalid csr: 0x%03x\n", csr)) } switch csr { case CsrTvec: return cpu.mtvec & 0xfffffffc case CsrTval: return cpu.mtval case CsrCause: return cpu.mcause case CsrEpc: return cpu.mepc & 0xfffffffe case CsrScratch: return cpu.mscratch default: fmt.Printf("invalid csr: 0x%03x\n", csr) } return 0 } func (cpu *Cpu) SetCsr(csr uint32, v uint32) { if csr == CsrHalt { cpu.halt = true cpu.haltValue = v return } priv := csr & ^uint32(0xcff) // save priv if priv != CsrM { panic(fmt.Sprintf("invalid csr: 0x%03x\n", csr)) } csr &= 0xcff // ignore priv switch csr { case CsrTvec: cpu.mtvec = v & 0xfffffffc case CsrCause: cpu.mcause = v case CsrTval: cpu.mtval = v case CsrScratch: cpu.mscratch = v case CsrEpc: cpu.mepc = v & 0xfffffffe } // do nothing } func (cpu *Cpu) Reset() { for i, _ := range cpu.registers { cpu.registers[i] = 0 } cpu.pc = cpu.initialAddr cpu.halt = false cpu.cycles = 0 cpu.ticks = 0 cpu.instret = 0 cpu.mtvec = 0 cpu.mcause = 0 cpu.mepc = 0 cpu.mtval = 0 cpu.mscratch = 0 } func (cpu *Cpu) GetReg(idx uint8) uint32 { if idx == 0 { return 0 } else if idx > 0 && idx < 32 { return cpu.registers[idx] } panic(fmt.Sprint("invalid register ", idx)) } func (cpu *Cpu) SetReg(idx uint8, v uint32) { if idx == 0 { // do nothing } else if idx > 0 && idx < 32 { cpu.registers[idx] = v } else { panic(fmt.Sprint("invalid register ", idx)) } } func (cpu *Cpu) Execute() { for !cpu.halt { cpu.Step() } } func (cpu *Cpu) Halt() { cpu.halt = true } func (cpu *Cpu) Debug() string { res := "" for i := uint8(1); i < 32; i++ { res += fmt.Sprintf("%s: 0x%08x ", _RegNames[i], cpu.GetReg(i)) } res += fmt.Sprintf("pc: 0x%08x ", cpu.pc) return res } func (cpu *Cpu) fetch() uint32 { inst := cpu.LoadWord(cpu.pc) cpu.pc += 4 return inst } func (cpu *Cpu) decode(inst uint32) { // we are only allowed to trap in the decode phase // this makes it so the trap function is only visible here trap := func(cause uint32, value uint32) { cpu.SetCsr(CsrTval|CsrM, value) cpu.SetCsr(CsrEpc|CsrM, cpu.pc-4) cpu.pc = cpu.GetCsr(CsrTvec | CsrM) cpu.SetCsr(CsrCause|CsrM, cause) cpu.cycles += 1 cpu.ticks += 1 } opcode := inst & 0x7f decode: switch opcode { case OP_IMM: _, rd, funct, rs1, imm := itype(inst) rs1v := cpu.GetReg(rs1) var res uint32 switch funct { case FUNCT_ADDI: res = rs1v + imm case FUNCT_SLTI: if int32(rs1v) < int32(imm) { res = 1 } else { res = 0 } case FUNCT_SLTUI: if rs1v < imm { res = 1 } else { res = 0 } case FUNCT_XORI: res = rs1v ^ imm case FUNCT_ANDI: res = rs1v & imm case FUNCT_ORI: res = rs1v | imm case FUNCT_SLLI: res = rs1v << imm case FUNCT_SRXI: if imm&0x400 != 0 { // golang does arithmatic shift for ints res = uint32(int32(rs1v) >> (imm & 0x1f)) } else { res = rs1v >> (imm & 0x1f) } default: trap(ExceptionIllegalInstruction, inst) break decode } cpu.SetReg(rd, res) case OP_LUI: _, rd, imm := utype(inst) cpu.SetReg(rd, imm<<12) case OP_AUIPC: _, rd, imm := utype(inst) cpu.SetReg(rd, cpu.pc+(imm<<12)-4) case OP: _, rd, funct3, rs1, rs2, funct7 := rtype(inst) rs1v := cpu.GetReg(rs1) rs2v := cpu.GetReg(rs2) var res uint32 switch funct3 { case FUNCT_ADD_SUB: if funct7&0x20 == 0
else { res = rs1v - rs2v } case FUNCT_SLT: if int32(rs1v) < int32(rs2v) { res = 1 } else { res = 0 } case FUNCT_SLTU: if rs1v < rs2v { res = 1 } else { res = 0 } case FUNCT_AND: res = rs1v & rs2v case FUNCT_OR: res = rs1v | rs2v case FUNCT_XOR: res = rs1v ^ rs2v case FUNCT_SLL: res = rs1v << (rs2v & 0x1f) case FUNCT_SRX: if funct7&0x20 == 0 { res = rs1v >> (rs2v & 0x1f) } else { res = uint32(int32(rs1v) >> (rs2v & 0x1f)) } default: trap(ExceptionIllegalInstruction, inst) break decode } cpu.SetReg(rd, res) case OP_JAL: _, rd, imm := jtype(inst) cpu.SetReg(rd, cpu.pc) cpu.pc += imm - 4 case OP_JALR: _, rd, _, rs1, imm := itype(inst) rs1v := cpu.GetReg(rs1) cpu.SetReg(rd, cpu.pc) cpu.pc = (rs1v + imm) & 0xfffffffe case OP_BRANCH: _, funct3, rs1, rs2, imm := btype(inst) rs1v := cpu.GetReg(rs1) rs2v := cpu.GetReg(rs2) var shouldBranch bool switch funct3 { case FUNCT_BEQ: shouldBranch = rs1v == rs2v case FUNCT_BNE: shouldBranch = rs1v != rs2v case FUNCT_BLT: shouldBranch = int32(rs1v) < int32(rs2v) case FUNCT_BLTU: shouldBranch = rs1v < rs2v case FUNCT_BGE: shouldBranch = int32(rs1v) >= int32(rs2v) case FUNCT_BGEU: shouldBranch = rs1v >= rs2v default: trap(ExceptionIllegalInstruction, inst) break decode } if shouldBranch { cpu.pc += imm - 4 } case OP_LOAD: _, dest, width, base, imm := itype(inst) addr := cpu.GetReg(base) + imm var res uint32 switch width { case 0: // LB res = signExtend(uint32(cpu.LoadByte(addr)), 8) case 1: // LH res = signExtend(uint32(cpu.LoadHalfWord(addr)), 16) case 2: // LW res = cpu.LoadWord(addr) case 4: // LBU res = uint32(cpu.LoadByte(addr)) case 5: // LHU res = uint32(cpu.LoadHalfWord(addr)) default: trap(ExceptionIllegalInstruction, inst) break decode } cpu.SetReg(dest, res) case OP_STORE: _, funct, rs1, rs2, imm := stype(inst) addr := cpu.GetReg(rs1) + imm rs2v := cpu.GetReg(rs2) switch funct { case 0: // SB cpu.StoreByte(addr, uint8(rs2v)) case 1: // SH cpu.StoreHalfWord(addr, uint16(rs2v)) case 2: // LW cpu.StoreWord(addr, rs2v) default: trap(ExceptionIllegalInstruction, inst) break decode } case OP_SYSTEM: _, rd, funct3, rs1, imm := itype(inst) switch funct3 { case FUNCT_CSRRW, FUNCT_CSRRS, FUNCT_CSRRC: csr := imm & 0xfff if !cpu.IsValidCsr(csr) { trap(ExceptionIllegalInstruction, inst) break decode } // check if we are trying to write to an RO csr isReadOnly := csr > 0xc00 if isReadOnly && rs1 != 0 { trap(ExceptionIllegalInstruction, inst) break decode } csrv := cpu.GetCsr(csr) rs1v := cpu.GetReg(rs1) cpu.SetReg(rd, csrv) if rs1 != 0 { switch funct3 { case FUNCT_CSRRW: csrv = rs1v case FUNCT_CSRRS: csrv = csrv & rs1v case FUNCT_CSRRC: csrv = csrv & (^rs1v) } cpu.SetCsr(csr, csrv) } case FUNCT_PRIV: switch imm { case PRIV_ECALL: trap(ExceptionEcallM, cpu.pc-4) break decode case PRIV_EBREAK: trap(ExceptionBreakpoint, cpu.pc-4) break decode default: trap(ExceptionIllegalInstruction, inst) break decode } default: trap(ExceptionIllegalInstruction, inst) break decode } default: trap(ExceptionIllegalInstruction, inst) } cpu.cycles += 1 cpu.ticks += 1 cpu.instret += 1 } func (cpu *Cpu) Step() { if cpu.halt { return } inst := cpu.fetch() cpu.decode(inst) } func bitrange(inst uint32, fromBit, len uint) uint32 { return (inst >> fromBit) & ((1 << len) - 1) } func signExtend(n uint32, bit uint) uint32 { if n&(1<<bit) != 0 { n |= ^((1 << bit) - 1) } return n } func btype(inst uint32) (opcode, funct3, rs1, rs2 uint8, imm uint32) { imm |= bitrange(inst, 8, 4) << 1 imm |= bitrange(inst, 25, 6) << 5 imm |= bitrange(inst, 7, 1) << 11 imm |= bitrange(inst, 31, 1) << 12 imm = signExtend(imm, 12) return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), uint8(bitrange(inst, 20, 5)), imm } func itype(inst uint32) (opcode, rd, funct, rs1 uint8, imm uint32) { return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), signExtend(bitrange(inst, 20, 12), 11) } func rtype(inst uint32) (opcode, rd, funct3, rs1, rs2, funct7 uint8) { return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), uint8(bitrange(inst, 20, 5)), uint8(bitrange(inst, 25, 7)) } func utype(inst uint32) (opcode, rd uint8, imm uint32) { return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), bitrange(inst, 12, 20) } func jtype(inst uint32) (opcode, rd uint8, imm uint32) { imm |= bitrange(inst, 21, 10) << 1 imm |= bitrange(inst, 20, 1) << 11 imm |= bitrange(inst, 12, 8) << 12 imm |= bitrange(inst, 31, 1) << 20 imm = signExtend(imm, 20) return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), imm } func stype(inst uint32) (opcode, funct3, rs1, rs2 uint8, imm uint32) { imm |= bitrange(inst, 7, 5) imm |= bitrange(inst, 25, 7) << 5 imm = signExtend(imm, 11) return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), uint8(bitrange(inst, 20, 5)), imm } type Board struct { cpu *Cpu } func (b *Board) Cpu() *Cpu { return b.cpu } func (b *Board) Execute() { b.cpu.Execute() } func (b *Board) Step() { b.cpu.Step() } const BoardInitialAddr = 0x100 func NewBoard(prog []uint8, in io.Reader, out io.Writer) *Board { mmu := NewMmu() mmu.AddRange(BoardInitialAddr, uint32(len(prog)), NewRamFromBuffer(prog)) mmu.AddRange(0xfffffffe, 1, &MmioSerial{r: in, w: out}) cpu := New(mmu, BoardInitialAddr) cpu.Reset() return &Board{ cpu: cpu, } } func main() { flag.Parse() if flag.NArg() != 1 { flag.Usage() flag.PrintDefaults() os.Exit(1) } prog, err := ioutil.ReadFile(flag.Arg(0)) if err != nil { panic(err) } board := NewBoard(prog, os.Stdin, os.Stdout) board.Execute() os.Exit(int(board.Cpu().GetCsr(CsrHalt))) }
{ res = rs1v + rs2v }
conditional_block
main.go
package main import ( "encoding/binary" "flag" "fmt" "io" "io/ioutil" "os" ) const ( OP_IMM = 0x13 OP_LUI = 0x37 OP_AUIPC = 0x17 OP = 0x33 OP_JAL = 0x6f OP_JALR = 0x67 OP_BRANCH = 0x63 OP_LOAD = 0x03 OP_STORE = 0x23 OP_SYSTEM = 0x73 ) // OP_IMM const ( FUNCT_ADDI = 0 FUNCT_SLLI = 1 FUNCT_SLTI = 2 FUNCT_SLTUI = 3 FUNCT_XORI = 4 FUNCT_SRXI = 5 FUNCT_ORI = 6 FUNCT_ANDI = 7 ) // OP const ( FUNCT_ADD_SUB = 0 FUNCT_SLL = 1 FUNCT_SLT = 2 FUNCT_SLTU = 3 FUNCT_XOR = 4 FUNCT_SRX = 5 FUNCT_OR = 6 FUNCT_AND = 7 ) // BRANCH const ( FUNCT_BEQ = 0 FUNCT_BNE = 1 FUNCT_BLT = 4 FUNCT_BGE = 5 FUNCT_BLTU = 6 FUNCT_BGEU = 7 ) // SYSTEM const ( FUNCT_CSRRW = 1 FUNCT_CSRRS = 2 FUNCT_CSRRC = 3 FUNCT_PRIV = 0 ) // SYSTEM PRIV const ( PRIV_EBREAK = 0x1 PRIV_ECALL = 0x00 ) // CSRs const ( CsrM = 0x300 CsrS = 0x100 CsrU = 0x000 CsrStatus = 0x000 CsrIe = 0x004 CsrTvec = 0x005 CsrScratch = 0x040 CsrEpc = 0x041 CsrCause = 0x042 CsrTval = 0x043 CsrCycle = 0xc00 CsrCycleh = 0xc80 CsrTime = 0xc01 CsrTimeh = 0xc81 CsrInstret = 0xc02 CsrInstreth = 0xc82 CsrHalt = 0x3ff ) // Exceptions const ( ExceptionIllegalInstruction = 2 ExceptionBreakpoint = 3 ExceptionEcallM = 11 ) const ( RegZero = 0 RegRA = 1 RegSP = 2 RegGP = 3 RegTP = 4 RegT0 = 5 RegT1 = 6 RegT2 = 7 RegFP = 8 RegS0 = 8 RegS1 = 9 RegA0 = 10 RegA1 = 11 RegA2 = 12 RegA3 = 13 RegA4 = 14 RegA5 = 15 RegA6 = 16 RegA7 = 17 RegS2 = 18 RegS3 = 19 RegS4 = 20 RegS5 = 21 RegS6 = 22 RegS7 = 23 RegS8 = 24 RegS9 = 25 RegS10 = 26 RegS11 = 27 RegST3 = 28 RegST4 = 29 RegST5 = 30 RegST6 = 31 ) var _RegNames []string = []string{ "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "FP", "S0", "S1", "A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7", "S2", "S3", "S4", "S5", "S6", "S7", "S8", "S9", "S10", "S11", "ST3", "ST4", "ST5", "ST6", } type Memory interface { LoadWord(addr uint32) uint32 LoadHalfWord(addr uint32) uint16 LoadByte(addr uint32) uint8 StoreWord(addr uint32, v uint32) StoreHalfWord(addr uint32, v uint16) StoreByte(addr uint32, v uint8) } type Ram struct { memory []uint8 } func NewRam(size uint32) *Ram { return NewRamFromBuffer(make([]uint8, size)) } func NewRamFromBuffer(buf []uint8) *Ram { return &Ram{buf} } func (mem *Ram) LoadWord(addr uint32) uint32 { return binary.LittleEndian.Uint32(mem.memory[addr : addr+4]) } func (mem *Ram) LoadHalfWord(addr uint32) uint16 { return binary.LittleEndian.Uint16(mem.memory[addr : addr+2]) } func (mem *Ram) LoadByte(addr uint32) uint8 { return mem.memory[addr] } func (mem *Ram) StoreWord(addr uint32, v uint32) { binary.LittleEndian.PutUint32(mem.memory[addr:addr+4], v) } func (mem *Ram) StoreHalfWord(addr uint32, v uint16) { binary.LittleEndian.PutUint16(mem.memory[addr:addr+2], v) } func (mem *Ram) StoreByte(addr uint32, v uint8) { mem.memory[addr] = v } type Range struct { Addr, Size uint32 Memory Memory } type Mmu struct { ranges []Range } func NewMmu() *Mmu { return &Mmu{} } func (mmu *Mmu) AddRange(addr, size uint32, mem Memory) { //@todo: sanity checks mmu.ranges = append(mmu.ranges, Range{addr, size, mem}) } func (mmu *Mmu) findRange(addr uint32) (*Range, uint32) { for _, r := range mmu.ranges { if addr >= r.Addr && addr < (r.Addr+r.Size) { return &r, addr - r.Addr } } return nil, 0 } func (mmu *Mmu) LoadWord(addr uint32) uint32 { r, addr := mmu.findRange(addr) if r != nil { return r.Memory.LoadWord(addr) } return 0 } func (mmu *Mmu) LoadHalfWord(addr uint32) uint16 { r, addr := mmu.findRange(addr) if r != nil { return r.Memory.LoadHalfWord(addr) } return 0 } func (mmu *Mmu) LoadByte(addr uint32) uint8 { r, addr := mmu.findRange(addr) if r != nil { return r.Memory.LoadByte(addr) } return 0 } func (mmu *Mmu) StoreWord(addr uint32, v uint32) { r, addr := mmu.findRange(addr) if r != nil { r.Memory.StoreWord(addr, v) } } func (mmu *Mmu) StoreHalfWord(addr uint32, v uint16) { r, addr := mmu.findRange(addr) if r != nil { r.Memory.StoreHalfWord(addr, v) } } func (mmu *Mmu) StoreByte(addr uint32, v uint8) { r, addr := mmu.findRange(addr) if r != nil { r.Memory.StoreByte(addr, v) } } type MmioSerial struct { w io.Writer r io.Reader } func (s *MmioSerial) LoadWord(addr uint32) uint32 { return uint32(s.LoadByte(addr)) } func (s *MmioSerial) LoadHalfWord(addr uint32) uint16 { return uint16(s.LoadByte(addr)) } func (s *MmioSerial) LoadByte(addr uint32) uint8 { if s.r == nil { return 0 } var b [1]uint8 s.r.Read(b[:]) return b[0] } func (s *MmioSerial) StoreWord(addr uint32, v uint32) { s.StoreByte(addr, uint8(v)) } func (s *MmioSerial) StoreHalfWord(addr uint32, v uint16) { s.StoreByte(addr, uint8(v)) } func (s *MmioSerial) StoreByte(addr uint32, v uint8) { if s.w == nil { return } b := []uint8{v} s.w.Write(b) } type Cpu struct { initialAddr uint32 registers [32]uint32 pc uint32 memory Memory halt bool cycles uint64 ticks uint64 instret uint64 mtvec uint32 mcause uint32 mepc uint32 mtval uint32 mscratch uint32 haltValue uint32 } func New(memory Memory, initialAddr uint32) *Cpu { cpu := &Cpu{} cpu.initialAddr = initialAddr cpu.memory = memory cpu.Reset() return cpu } func (cpu *Cpu) LoadWord(addr uint32) uint32 { return cpu.memory.LoadWord(addr) } func (cpu *Cpu) LoadHalfWord(addr uint32) uint16 { return cpu.memory.LoadHalfWord(addr) } func (cpu *Cpu) LoadByte(addr uint32) uint8 { return cpu.memory.LoadByte(addr) } func (cpu *Cpu) StoreWord(addr uint32, v uint32) { cpu.memory.StoreWord(addr, v) } func (cpu *Cpu) StoreHalfWord(addr uint32, v uint16) { cpu.memory.StoreHalfWord(addr, v) } func (cpu *Cpu) StoreByte(addr uint32, v uint8) { cpu.memory.StoreByte(addr, v) } func (cpu *Cpu) IsValidCsr(csr uint32) bool { if csr == CsrHalt { return true } priv := csr & ^uint32(0xcff) // save priv csr &= 0xcff // ignore priv switch csr { case CsrCycle, CsrCycleh, CsrTime, CsrTimeh, CsrInstret, CsrInstreth: return true } if priv != CsrM { return false } switch csr { case CsrTvec, CsrTval, CsrCause, CsrEpc, CsrScratch: return true } return false } func (cpu *Cpu) GetCsr(csr uint32) uint32 { if csr == CsrHalt { return cpu.haltValue } priv := csr & ^uint32(0xcff) // save priv csr &= 0xcff // ignore priv switch csr { case CsrCycle: return uint32(cpu.cycles) case CsrCycleh: return uint32(cpu.cycles >> 32) case CsrTime: return uint32(cpu.ticks) case CsrTimeh: return uint32(cpu.ticks >> 32) case CsrInstret: return uint32(cpu.instret) case CsrInstreth: return uint32(cpu.instret >> 32) } // we only have machine mode csrs for everything else if priv != CsrM { panic(fmt.Sprintf("invalid csr: 0x%03x\n", csr)) } switch csr { case CsrTvec: return cpu.mtvec & 0xfffffffc case CsrTval: return cpu.mtval case CsrCause: return cpu.mcause case CsrEpc: return cpu.mepc & 0xfffffffe case CsrScratch: return cpu.mscratch default: fmt.Printf("invalid csr: 0x%03x\n", csr) } return 0 } func (cpu *Cpu) SetCsr(csr uint32, v uint32) { if csr == CsrHalt { cpu.halt = true cpu.haltValue = v return } priv := csr & ^uint32(0xcff) // save priv if priv != CsrM { panic(fmt.Sprintf("invalid csr: 0x%03x\n", csr)) } csr &= 0xcff // ignore priv switch csr { case CsrTvec: cpu.mtvec = v & 0xfffffffc case CsrCause: cpu.mcause = v case CsrTval: cpu.mtval = v case CsrScratch: cpu.mscratch = v case CsrEpc: cpu.mepc = v & 0xfffffffe } // do nothing } func (cpu *Cpu) Reset() { for i, _ := range cpu.registers { cpu.registers[i] = 0 } cpu.pc = cpu.initialAddr cpu.halt = false cpu.cycles = 0 cpu.ticks = 0 cpu.instret = 0 cpu.mtvec = 0 cpu.mcause = 0 cpu.mepc = 0 cpu.mtval = 0 cpu.mscratch = 0 } func (cpu *Cpu) GetReg(idx uint8) uint32 { if idx == 0 { return 0 } else if idx > 0 && idx < 32 { return cpu.registers[idx] } panic(fmt.Sprint("invalid register ", idx)) } func (cpu *Cpu) SetReg(idx uint8, v uint32) { if idx == 0 { // do nothing } else if idx > 0 && idx < 32 { cpu.registers[idx] = v } else { panic(fmt.Sprint("invalid register ", idx)) } } func (cpu *Cpu) Execute() { for !cpu.halt { cpu.Step() } } func (cpu *Cpu) Halt() { cpu.halt = true } func (cpu *Cpu) Debug() string { res := "" for i := uint8(1); i < 32; i++ { res += fmt.Sprintf("%s: 0x%08x ", _RegNames[i], cpu.GetReg(i)) } res += fmt.Sprintf("pc: 0x%08x ", cpu.pc) return res } func (cpu *Cpu) fetch() uint32 { inst := cpu.LoadWord(cpu.pc) cpu.pc += 4 return inst } func (cpu *Cpu) decode(inst uint32) { // we are only allowed to trap in the decode phase // this makes it so the trap function is only visible here trap := func(cause uint32, value uint32) { cpu.SetCsr(CsrTval|CsrM, value) cpu.SetCsr(CsrEpc|CsrM, cpu.pc-4) cpu.pc = cpu.GetCsr(CsrTvec | CsrM) cpu.SetCsr(CsrCause|CsrM, cause) cpu.cycles += 1 cpu.ticks += 1 } opcode := inst & 0x7f decode: switch opcode { case OP_IMM: _, rd, funct, rs1, imm := itype(inst) rs1v := cpu.GetReg(rs1) var res uint32 switch funct { case FUNCT_ADDI: res = rs1v + imm case FUNCT_SLTI: if int32(rs1v) < int32(imm) { res = 1 } else { res = 0 } case FUNCT_SLTUI: if rs1v < imm { res = 1 } else { res = 0 } case FUNCT_XORI: res = rs1v ^ imm case FUNCT_ANDI: res = rs1v & imm case FUNCT_ORI: res = rs1v | imm case FUNCT_SLLI: res = rs1v << imm case FUNCT_SRXI: if imm&0x400 != 0 { // golang does arithmatic shift for ints res = uint32(int32(rs1v) >> (imm & 0x1f)) } else { res = rs1v >> (imm & 0x1f) } default: trap(ExceptionIllegalInstruction, inst) break decode } cpu.SetReg(rd, res) case OP_LUI: _, rd, imm := utype(inst) cpu.SetReg(rd, imm<<12) case OP_AUIPC: _, rd, imm := utype(inst) cpu.SetReg(rd, cpu.pc+(imm<<12)-4) case OP: _, rd, funct3, rs1, rs2, funct7 := rtype(inst) rs1v := cpu.GetReg(rs1) rs2v := cpu.GetReg(rs2) var res uint32 switch funct3 { case FUNCT_ADD_SUB: if funct7&0x20 == 0 { res = rs1v + rs2v } else { res = rs1v - rs2v } case FUNCT_SLT: if int32(rs1v) < int32(rs2v) { res = 1 } else { res = 0 } case FUNCT_SLTU: if rs1v < rs2v { res = 1 } else { res = 0 } case FUNCT_AND: res = rs1v & rs2v case FUNCT_OR: res = rs1v | rs2v case FUNCT_XOR: res = rs1v ^ rs2v case FUNCT_SLL: res = rs1v << (rs2v & 0x1f) case FUNCT_SRX: if funct7&0x20 == 0 { res = rs1v >> (rs2v & 0x1f) } else { res = uint32(int32(rs1v) >> (rs2v & 0x1f)) } default: trap(ExceptionIllegalInstruction, inst) break decode } cpu.SetReg(rd, res) case OP_JAL: _, rd, imm := jtype(inst) cpu.SetReg(rd, cpu.pc) cpu.pc += imm - 4 case OP_JALR: _, rd, _, rs1, imm := itype(inst) rs1v := cpu.GetReg(rs1) cpu.SetReg(rd, cpu.pc) cpu.pc = (rs1v + imm) & 0xfffffffe case OP_BRANCH: _, funct3, rs1, rs2, imm := btype(inst) rs1v := cpu.GetReg(rs1) rs2v := cpu.GetReg(rs2) var shouldBranch bool switch funct3 { case FUNCT_BEQ: shouldBranch = rs1v == rs2v case FUNCT_BNE: shouldBranch = rs1v != rs2v case FUNCT_BLT: shouldBranch = int32(rs1v) < int32(rs2v) case FUNCT_BLTU: shouldBranch = rs1v < rs2v case FUNCT_BGE: shouldBranch = int32(rs1v) >= int32(rs2v) case FUNCT_BGEU: shouldBranch = rs1v >= rs2v default: trap(ExceptionIllegalInstruction, inst) break decode } if shouldBranch { cpu.pc += imm - 4 } case OP_LOAD: _, dest, width, base, imm := itype(inst) addr := cpu.GetReg(base) + imm var res uint32 switch width { case 0: // LB res = signExtend(uint32(cpu.LoadByte(addr)), 8) case 1: // LH res = signExtend(uint32(cpu.LoadHalfWord(addr)), 16) case 2: // LW res = cpu.LoadWord(addr) case 4: // LBU res = uint32(cpu.LoadByte(addr)) case 5: // LHU res = uint32(cpu.LoadHalfWord(addr)) default: trap(ExceptionIllegalInstruction, inst) break decode } cpu.SetReg(dest, res) case OP_STORE: _, funct, rs1, rs2, imm := stype(inst) addr := cpu.GetReg(rs1) + imm rs2v := cpu.GetReg(rs2) switch funct { case 0: // SB cpu.StoreByte(addr, uint8(rs2v)) case 1: // SH cpu.StoreHalfWord(addr, uint16(rs2v)) case 2: // LW cpu.StoreWord(addr, rs2v) default: trap(ExceptionIllegalInstruction, inst) break decode } case OP_SYSTEM: _, rd, funct3, rs1, imm := itype(inst) switch funct3 { case FUNCT_CSRRW, FUNCT_CSRRS, FUNCT_CSRRC: csr := imm & 0xfff if !cpu.IsValidCsr(csr) { trap(ExceptionIllegalInstruction, inst) break decode } // check if we are trying to write to an RO csr isReadOnly := csr > 0xc00 if isReadOnly && rs1 != 0 { trap(ExceptionIllegalInstruction, inst) break decode } csrv := cpu.GetCsr(csr) rs1v := cpu.GetReg(rs1) cpu.SetReg(rd, csrv) if rs1 != 0 { switch funct3 { case FUNCT_CSRRW: csrv = rs1v case FUNCT_CSRRS: csrv = csrv & rs1v case FUNCT_CSRRC: csrv = csrv & (^rs1v) } cpu.SetCsr(csr, csrv) } case FUNCT_PRIV: switch imm { case PRIV_ECALL: trap(ExceptionEcallM, cpu.pc-4) break decode case PRIV_EBREAK: trap(ExceptionBreakpoint, cpu.pc-4) break decode default: trap(ExceptionIllegalInstruction, inst) break decode } default: trap(ExceptionIllegalInstruction, inst) break decode } default: trap(ExceptionIllegalInstruction, inst) } cpu.cycles += 1 cpu.ticks += 1 cpu.instret += 1 } func (cpu *Cpu) Step() { if cpu.halt { return } inst := cpu.fetch() cpu.decode(inst) } func bitrange(inst uint32, fromBit, len uint) uint32 { return (inst >> fromBit) & ((1 << len) - 1) } func signExtend(n uint32, bit uint) uint32 { if n&(1<<bit) != 0 { n |= ^((1 << bit) - 1) } return n } func
(inst uint32) (opcode, funct3, rs1, rs2 uint8, imm uint32) { imm |= bitrange(inst, 8, 4) << 1 imm |= bitrange(inst, 25, 6) << 5 imm |= bitrange(inst, 7, 1) << 11 imm |= bitrange(inst, 31, 1) << 12 imm = signExtend(imm, 12) return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), uint8(bitrange(inst, 20, 5)), imm } func itype(inst uint32) (opcode, rd, funct, rs1 uint8, imm uint32) { return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), signExtend(bitrange(inst, 20, 12), 11) } func rtype(inst uint32) (opcode, rd, funct3, rs1, rs2, funct7 uint8) { return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), uint8(bitrange(inst, 20, 5)), uint8(bitrange(inst, 25, 7)) } func utype(inst uint32) (opcode, rd uint8, imm uint32) { return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), bitrange(inst, 12, 20) } func jtype(inst uint32) (opcode, rd uint8, imm uint32) { imm |= bitrange(inst, 21, 10) << 1 imm |= bitrange(inst, 20, 1) << 11 imm |= bitrange(inst, 12, 8) << 12 imm |= bitrange(inst, 31, 1) << 20 imm = signExtend(imm, 20) return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), imm } func stype(inst uint32) (opcode, funct3, rs1, rs2 uint8, imm uint32) { imm |= bitrange(inst, 7, 5) imm |= bitrange(inst, 25, 7) << 5 imm = signExtend(imm, 11) return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), uint8(bitrange(inst, 20, 5)), imm } type Board struct { cpu *Cpu } func (b *Board) Cpu() *Cpu { return b.cpu } func (b *Board) Execute() { b.cpu.Execute() } func (b *Board) Step() { b.cpu.Step() } const BoardInitialAddr = 0x100 func NewBoard(prog []uint8, in io.Reader, out io.Writer) *Board { mmu := NewMmu() mmu.AddRange(BoardInitialAddr, uint32(len(prog)), NewRamFromBuffer(prog)) mmu.AddRange(0xfffffffe, 1, &MmioSerial{r: in, w: out}) cpu := New(mmu, BoardInitialAddr) cpu.Reset() return &Board{ cpu: cpu, } } func main() { flag.Parse() if flag.NArg() != 1 { flag.Usage() flag.PrintDefaults() os.Exit(1) } prog, err := ioutil.ReadFile(flag.Arg(0)) if err != nil { panic(err) } board := NewBoard(prog, os.Stdin, os.Stdout) board.Execute() os.Exit(int(board.Cpu().GetCsr(CsrHalt))) }
btype
identifier_name
main.go
package main import ( "encoding/binary" "flag" "fmt" "io" "io/ioutil" "os" ) const ( OP_IMM = 0x13 OP_LUI = 0x37 OP_AUIPC = 0x17 OP = 0x33 OP_JAL = 0x6f OP_JALR = 0x67 OP_BRANCH = 0x63 OP_LOAD = 0x03 OP_STORE = 0x23 OP_SYSTEM = 0x73 ) // OP_IMM const ( FUNCT_ADDI = 0 FUNCT_SLLI = 1 FUNCT_SLTI = 2 FUNCT_SLTUI = 3 FUNCT_XORI = 4 FUNCT_SRXI = 5 FUNCT_ORI = 6 FUNCT_ANDI = 7 ) // OP const ( FUNCT_ADD_SUB = 0 FUNCT_SLL = 1 FUNCT_SLT = 2 FUNCT_SLTU = 3 FUNCT_XOR = 4 FUNCT_SRX = 5 FUNCT_OR = 6 FUNCT_AND = 7 ) // BRANCH const ( FUNCT_BEQ = 0 FUNCT_BNE = 1 FUNCT_BLT = 4 FUNCT_BGE = 5 FUNCT_BLTU = 6 FUNCT_BGEU = 7 ) // SYSTEM const ( FUNCT_CSRRW = 1 FUNCT_CSRRS = 2 FUNCT_CSRRC = 3 FUNCT_PRIV = 0 ) // SYSTEM PRIV const ( PRIV_EBREAK = 0x1 PRIV_ECALL = 0x00 ) // CSRs const ( CsrM = 0x300 CsrS = 0x100 CsrU = 0x000 CsrStatus = 0x000 CsrIe = 0x004 CsrTvec = 0x005 CsrScratch = 0x040 CsrEpc = 0x041 CsrCause = 0x042 CsrTval = 0x043 CsrCycle = 0xc00 CsrCycleh = 0xc80 CsrTime = 0xc01 CsrTimeh = 0xc81 CsrInstret = 0xc02 CsrInstreth = 0xc82 CsrHalt = 0x3ff ) // Exceptions const ( ExceptionIllegalInstruction = 2 ExceptionBreakpoint = 3 ExceptionEcallM = 11 ) const ( RegZero = 0 RegRA = 1 RegSP = 2 RegGP = 3 RegTP = 4 RegT0 = 5 RegT1 = 6 RegT2 = 7 RegFP = 8 RegS0 = 8 RegS1 = 9 RegA0 = 10 RegA1 = 11 RegA2 = 12 RegA3 = 13 RegA4 = 14 RegA5 = 15 RegA6 = 16 RegA7 = 17 RegS2 = 18 RegS3 = 19 RegS4 = 20 RegS5 = 21 RegS6 = 22 RegS7 = 23 RegS8 = 24 RegS9 = 25 RegS10 = 26 RegS11 = 27 RegST3 = 28 RegST4 = 29 RegST5 = 30 RegST6 = 31 ) var _RegNames []string = []string{ "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "FP", "S0", "S1", "A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7", "S2", "S3", "S4", "S5", "S6", "S7", "S8", "S9", "S10", "S11", "ST3", "ST4", "ST5", "ST6", } type Memory interface { LoadWord(addr uint32) uint32 LoadHalfWord(addr uint32) uint16 LoadByte(addr uint32) uint8 StoreWord(addr uint32, v uint32) StoreHalfWord(addr uint32, v uint16) StoreByte(addr uint32, v uint8) } type Ram struct { memory []uint8 } func NewRam(size uint32) *Ram { return NewRamFromBuffer(make([]uint8, size)) } func NewRamFromBuffer(buf []uint8) *Ram { return &Ram{buf} } func (mem *Ram) LoadWord(addr uint32) uint32 { return binary.LittleEndian.Uint32(mem.memory[addr : addr+4]) } func (mem *Ram) LoadHalfWord(addr uint32) uint16 { return binary.LittleEndian.Uint16(mem.memory[addr : addr+2]) } func (mem *Ram) LoadByte(addr uint32) uint8 { return mem.memory[addr] } func (mem *Ram) StoreWord(addr uint32, v uint32) { binary.LittleEndian.PutUint32(mem.memory[addr:addr+4], v) } func (mem *Ram) StoreHalfWord(addr uint32, v uint16) { binary.LittleEndian.PutUint16(mem.memory[addr:addr+2], v) } func (mem *Ram) StoreByte(addr uint32, v uint8) { mem.memory[addr] = v } type Range struct { Addr, Size uint32 Memory Memory } type Mmu struct { ranges []Range } func NewMmu() *Mmu { return &Mmu{} } func (mmu *Mmu) AddRange(addr, size uint32, mem Memory) { //@todo: sanity checks mmu.ranges = append(mmu.ranges, Range{addr, size, mem}) } func (mmu *Mmu) findRange(addr uint32) (*Range, uint32) { for _, r := range mmu.ranges { if addr >= r.Addr && addr < (r.Addr+r.Size) { return &r, addr - r.Addr } } return nil, 0 } func (mmu *Mmu) LoadWord(addr uint32) uint32 { r, addr := mmu.findRange(addr) if r != nil { return r.Memory.LoadWord(addr) } return 0 } func (mmu *Mmu) LoadHalfWord(addr uint32) uint16 { r, addr := mmu.findRange(addr) if r != nil { return r.Memory.LoadHalfWord(addr) } return 0 } func (mmu *Mmu) LoadByte(addr uint32) uint8 { r, addr := mmu.findRange(addr) if r != nil { return r.Memory.LoadByte(addr) } return 0 } func (mmu *Mmu) StoreWord(addr uint32, v uint32) { r, addr := mmu.findRange(addr) if r != nil { r.Memory.StoreWord(addr, v) } } func (mmu *Mmu) StoreHalfWord(addr uint32, v uint16) { r, addr := mmu.findRange(addr) if r != nil { r.Memory.StoreHalfWord(addr, v) } } func (mmu *Mmu) StoreByte(addr uint32, v uint8) { r, addr := mmu.findRange(addr) if r != nil { r.Memory.StoreByte(addr, v) } } type MmioSerial struct { w io.Writer r io.Reader } func (s *MmioSerial) LoadWord(addr uint32) uint32 { return uint32(s.LoadByte(addr)) } func (s *MmioSerial) LoadHalfWord(addr uint32) uint16 { return uint16(s.LoadByte(addr)) } func (s *MmioSerial) LoadByte(addr uint32) uint8 { if s.r == nil { return 0 } var b [1]uint8 s.r.Read(b[:]) return b[0] } func (s *MmioSerial) StoreWord(addr uint32, v uint32) { s.StoreByte(addr, uint8(v)) } func (s *MmioSerial) StoreHalfWord(addr uint32, v uint16) { s.StoreByte(addr, uint8(v)) } func (s *MmioSerial) StoreByte(addr uint32, v uint8) { if s.w == nil { return } b := []uint8{v} s.w.Write(b) } type Cpu struct { initialAddr uint32 registers [32]uint32 pc uint32 memory Memory halt bool cycles uint64 ticks uint64 instret uint64 mtvec uint32 mcause uint32 mepc uint32 mtval uint32 mscratch uint32 haltValue uint32 } func New(memory Memory, initialAddr uint32) *Cpu { cpu := &Cpu{} cpu.initialAddr = initialAddr cpu.memory = memory cpu.Reset() return cpu } func (cpu *Cpu) LoadWord(addr uint32) uint32 { return cpu.memory.LoadWord(addr) } func (cpu *Cpu) LoadHalfWord(addr uint32) uint16 { return cpu.memory.LoadHalfWord(addr) } func (cpu *Cpu) LoadByte(addr uint32) uint8 { return cpu.memory.LoadByte(addr) } func (cpu *Cpu) StoreWord(addr uint32, v uint32) { cpu.memory.StoreWord(addr, v) } func (cpu *Cpu) StoreHalfWord(addr uint32, v uint16) { cpu.memory.StoreHalfWord(addr, v) } func (cpu *Cpu) StoreByte(addr uint32, v uint8) { cpu.memory.StoreByte(addr, v) } func (cpu *Cpu) IsValidCsr(csr uint32) bool { if csr == CsrHalt { return true } priv := csr & ^uint32(0xcff) // save priv csr &= 0xcff // ignore priv switch csr { case CsrCycle, CsrCycleh, CsrTime, CsrTimeh, CsrInstret, CsrInstreth: return true } if priv != CsrM { return false } switch csr { case CsrTvec, CsrTval, CsrCause, CsrEpc, CsrScratch: return true } return false } func (cpu *Cpu) GetCsr(csr uint32) uint32
func (cpu *Cpu) SetCsr(csr uint32, v uint32) { if csr == CsrHalt { cpu.halt = true cpu.haltValue = v return } priv := csr & ^uint32(0xcff) // save priv if priv != CsrM { panic(fmt.Sprintf("invalid csr: 0x%03x\n", csr)) } csr &= 0xcff // ignore priv switch csr { case CsrTvec: cpu.mtvec = v & 0xfffffffc case CsrCause: cpu.mcause = v case CsrTval: cpu.mtval = v case CsrScratch: cpu.mscratch = v case CsrEpc: cpu.mepc = v & 0xfffffffe } // do nothing } func (cpu *Cpu) Reset() { for i, _ := range cpu.registers { cpu.registers[i] = 0 } cpu.pc = cpu.initialAddr cpu.halt = false cpu.cycles = 0 cpu.ticks = 0 cpu.instret = 0 cpu.mtvec = 0 cpu.mcause = 0 cpu.mepc = 0 cpu.mtval = 0 cpu.mscratch = 0 } func (cpu *Cpu) GetReg(idx uint8) uint32 { if idx == 0 { return 0 } else if idx > 0 && idx < 32 { return cpu.registers[idx] } panic(fmt.Sprint("invalid register ", idx)) } func (cpu *Cpu) SetReg(idx uint8, v uint32) { if idx == 0 { // do nothing } else if idx > 0 && idx < 32 { cpu.registers[idx] = v } else { panic(fmt.Sprint("invalid register ", idx)) } } func (cpu *Cpu) Execute() { for !cpu.halt { cpu.Step() } } func (cpu *Cpu) Halt() { cpu.halt = true } func (cpu *Cpu) Debug() string { res := "" for i := uint8(1); i < 32; i++ { res += fmt.Sprintf("%s: 0x%08x ", _RegNames[i], cpu.GetReg(i)) } res += fmt.Sprintf("pc: 0x%08x ", cpu.pc) return res } func (cpu *Cpu) fetch() uint32 { inst := cpu.LoadWord(cpu.pc) cpu.pc += 4 return inst } func (cpu *Cpu) decode(inst uint32) { // we are only allowed to trap in the decode phase // this makes it so the trap function is only visible here trap := func(cause uint32, value uint32) { cpu.SetCsr(CsrTval|CsrM, value) cpu.SetCsr(CsrEpc|CsrM, cpu.pc-4) cpu.pc = cpu.GetCsr(CsrTvec | CsrM) cpu.SetCsr(CsrCause|CsrM, cause) cpu.cycles += 1 cpu.ticks += 1 } opcode := inst & 0x7f decode: switch opcode { case OP_IMM: _, rd, funct, rs1, imm := itype(inst) rs1v := cpu.GetReg(rs1) var res uint32 switch funct { case FUNCT_ADDI: res = rs1v + imm case FUNCT_SLTI: if int32(rs1v) < int32(imm) { res = 1 } else { res = 0 } case FUNCT_SLTUI: if rs1v < imm { res = 1 } else { res = 0 } case FUNCT_XORI: res = rs1v ^ imm case FUNCT_ANDI: res = rs1v & imm case FUNCT_ORI: res = rs1v | imm case FUNCT_SLLI: res = rs1v << imm case FUNCT_SRXI: if imm&0x400 != 0 { // golang does arithmatic shift for ints res = uint32(int32(rs1v) >> (imm & 0x1f)) } else { res = rs1v >> (imm & 0x1f) } default: trap(ExceptionIllegalInstruction, inst) break decode } cpu.SetReg(rd, res) case OP_LUI: _, rd, imm := utype(inst) cpu.SetReg(rd, imm<<12) case OP_AUIPC: _, rd, imm := utype(inst) cpu.SetReg(rd, cpu.pc+(imm<<12)-4) case OP: _, rd, funct3, rs1, rs2, funct7 := rtype(inst) rs1v := cpu.GetReg(rs1) rs2v := cpu.GetReg(rs2) var res uint32 switch funct3 { case FUNCT_ADD_SUB: if funct7&0x20 == 0 { res = rs1v + rs2v } else { res = rs1v - rs2v } case FUNCT_SLT: if int32(rs1v) < int32(rs2v) { res = 1 } else { res = 0 } case FUNCT_SLTU: if rs1v < rs2v { res = 1 } else { res = 0 } case FUNCT_AND: res = rs1v & rs2v case FUNCT_OR: res = rs1v | rs2v case FUNCT_XOR: res = rs1v ^ rs2v case FUNCT_SLL: res = rs1v << (rs2v & 0x1f) case FUNCT_SRX: if funct7&0x20 == 0 { res = rs1v >> (rs2v & 0x1f) } else { res = uint32(int32(rs1v) >> (rs2v & 0x1f)) } default: trap(ExceptionIllegalInstruction, inst) break decode } cpu.SetReg(rd, res) case OP_JAL: _, rd, imm := jtype(inst) cpu.SetReg(rd, cpu.pc) cpu.pc += imm - 4 case OP_JALR: _, rd, _, rs1, imm := itype(inst) rs1v := cpu.GetReg(rs1) cpu.SetReg(rd, cpu.pc) cpu.pc = (rs1v + imm) & 0xfffffffe case OP_BRANCH: _, funct3, rs1, rs2, imm := btype(inst) rs1v := cpu.GetReg(rs1) rs2v := cpu.GetReg(rs2) var shouldBranch bool switch funct3 { case FUNCT_BEQ: shouldBranch = rs1v == rs2v case FUNCT_BNE: shouldBranch = rs1v != rs2v case FUNCT_BLT: shouldBranch = int32(rs1v) < int32(rs2v) case FUNCT_BLTU: shouldBranch = rs1v < rs2v case FUNCT_BGE: shouldBranch = int32(rs1v) >= int32(rs2v) case FUNCT_BGEU: shouldBranch = rs1v >= rs2v default: trap(ExceptionIllegalInstruction, inst) break decode } if shouldBranch { cpu.pc += imm - 4 } case OP_LOAD: _, dest, width, base, imm := itype(inst) addr := cpu.GetReg(base) + imm var res uint32 switch width { case 0: // LB res = signExtend(uint32(cpu.LoadByte(addr)), 8) case 1: // LH res = signExtend(uint32(cpu.LoadHalfWord(addr)), 16) case 2: // LW res = cpu.LoadWord(addr) case 4: // LBU res = uint32(cpu.LoadByte(addr)) case 5: // LHU res = uint32(cpu.LoadHalfWord(addr)) default: trap(ExceptionIllegalInstruction, inst) break decode } cpu.SetReg(dest, res) case OP_STORE: _, funct, rs1, rs2, imm := stype(inst) addr := cpu.GetReg(rs1) + imm rs2v := cpu.GetReg(rs2) switch funct { case 0: // SB cpu.StoreByte(addr, uint8(rs2v)) case 1: // SH cpu.StoreHalfWord(addr, uint16(rs2v)) case 2: // LW cpu.StoreWord(addr, rs2v) default: trap(ExceptionIllegalInstruction, inst) break decode } case OP_SYSTEM: _, rd, funct3, rs1, imm := itype(inst) switch funct3 { case FUNCT_CSRRW, FUNCT_CSRRS, FUNCT_CSRRC: csr := imm & 0xfff if !cpu.IsValidCsr(csr) { trap(ExceptionIllegalInstruction, inst) break decode } // check if we are trying to write to an RO csr isReadOnly := csr > 0xc00 if isReadOnly && rs1 != 0 { trap(ExceptionIllegalInstruction, inst) break decode } csrv := cpu.GetCsr(csr) rs1v := cpu.GetReg(rs1) cpu.SetReg(rd, csrv) if rs1 != 0 { switch funct3 { case FUNCT_CSRRW: csrv = rs1v case FUNCT_CSRRS: csrv = csrv & rs1v case FUNCT_CSRRC: csrv = csrv & (^rs1v) } cpu.SetCsr(csr, csrv) } case FUNCT_PRIV: switch imm { case PRIV_ECALL: trap(ExceptionEcallM, cpu.pc-4) break decode case PRIV_EBREAK: trap(ExceptionBreakpoint, cpu.pc-4) break decode default: trap(ExceptionIllegalInstruction, inst) break decode } default: trap(ExceptionIllegalInstruction, inst) break decode } default: trap(ExceptionIllegalInstruction, inst) } cpu.cycles += 1 cpu.ticks += 1 cpu.instret += 1 } func (cpu *Cpu) Step() { if cpu.halt { return } inst := cpu.fetch() cpu.decode(inst) } func bitrange(inst uint32, fromBit, len uint) uint32 { return (inst >> fromBit) & ((1 << len) - 1) } func signExtend(n uint32, bit uint) uint32 { if n&(1<<bit) != 0 { n |= ^((1 << bit) - 1) } return n } func btype(inst uint32) (opcode, funct3, rs1, rs2 uint8, imm uint32) { imm |= bitrange(inst, 8, 4) << 1 imm |= bitrange(inst, 25, 6) << 5 imm |= bitrange(inst, 7, 1) << 11 imm |= bitrange(inst, 31, 1) << 12 imm = signExtend(imm, 12) return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), uint8(bitrange(inst, 20, 5)), imm } func itype(inst uint32) (opcode, rd, funct, rs1 uint8, imm uint32) { return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), signExtend(bitrange(inst, 20, 12), 11) } func rtype(inst uint32) (opcode, rd, funct3, rs1, rs2, funct7 uint8) { return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), uint8(bitrange(inst, 20, 5)), uint8(bitrange(inst, 25, 7)) } func utype(inst uint32) (opcode, rd uint8, imm uint32) { return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), bitrange(inst, 12, 20) } func jtype(inst uint32) (opcode, rd uint8, imm uint32) { imm |= bitrange(inst, 21, 10) << 1 imm |= bitrange(inst, 20, 1) << 11 imm |= bitrange(inst, 12, 8) << 12 imm |= bitrange(inst, 31, 1) << 20 imm = signExtend(imm, 20) return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 7, 5)), imm } func stype(inst uint32) (opcode, funct3, rs1, rs2 uint8, imm uint32) { imm |= bitrange(inst, 7, 5) imm |= bitrange(inst, 25, 7) << 5 imm = signExtend(imm, 11) return uint8(bitrange(inst, 0, 7)), uint8(bitrange(inst, 12, 3)), uint8(bitrange(inst, 15, 5)), uint8(bitrange(inst, 20, 5)), imm } type Board struct { cpu *Cpu } func (b *Board) Cpu() *Cpu { return b.cpu } func (b *Board) Execute() { b.cpu.Execute() } func (b *Board) Step() { b.cpu.Step() } const BoardInitialAddr = 0x100 func NewBoard(prog []uint8, in io.Reader, out io.Writer) *Board { mmu := NewMmu() mmu.AddRange(BoardInitialAddr, uint32(len(prog)), NewRamFromBuffer(prog)) mmu.AddRange(0xfffffffe, 1, &MmioSerial{r: in, w: out}) cpu := New(mmu, BoardInitialAddr) cpu.Reset() return &Board{ cpu: cpu, } } func main() { flag.Parse() if flag.NArg() != 1 { flag.Usage() flag.PrintDefaults() os.Exit(1) } prog, err := ioutil.ReadFile(flag.Arg(0)) if err != nil { panic(err) } board := NewBoard(prog, os.Stdin, os.Stdout) board.Execute() os.Exit(int(board.Cpu().GetCsr(CsrHalt))) }
{ if csr == CsrHalt { return cpu.haltValue } priv := csr & ^uint32(0xcff) // save priv csr &= 0xcff // ignore priv switch csr { case CsrCycle: return uint32(cpu.cycles) case CsrCycleh: return uint32(cpu.cycles >> 32) case CsrTime: return uint32(cpu.ticks) case CsrTimeh: return uint32(cpu.ticks >> 32) case CsrInstret: return uint32(cpu.instret) case CsrInstreth: return uint32(cpu.instret >> 32) } // we only have machine mode csrs for everything else if priv != CsrM { panic(fmt.Sprintf("invalid csr: 0x%03x\n", csr)) } switch csr { case CsrTvec: return cpu.mtvec & 0xfffffffc case CsrTval: return cpu.mtval case CsrCause: return cpu.mcause case CsrEpc: return cpu.mepc & 0xfffffffe case CsrScratch: return cpu.mscratch default: fmt.Printf("invalid csr: 0x%03x\n", csr) } return 0 }
identifier_body
storer.go
// Copyright 2023 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package storer import ( "context" "errors" "fmt" "io" "io/fs" "math/big" "os" "path" "path/filepath" "sync" "sync/atomic" "time" "github.com/ethersphere/bee/pkg/log" m "github.com/ethersphere/bee/pkg/metrics" "github.com/ethersphere/bee/pkg/postage" "github.com/ethersphere/bee/pkg/pusher" "github.com/ethersphere/bee/pkg/retrieval" "github.com/ethersphere/bee/pkg/sharky" "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage/leveldbstore" "github.com/ethersphere/bee/pkg/storage/migration" "github.com/ethersphere/bee/pkg/storer/internal" "github.com/ethersphere/bee/pkg/storer/internal/cache" "github.com/ethersphere/bee/pkg/storer/internal/chunkstore" "github.com/ethersphere/bee/pkg/storer/internal/events" pinstore "github.com/ethersphere/bee/pkg/storer/internal/pinning" "github.com/ethersphere/bee/pkg/storer/internal/reserve" "github.com/ethersphere/bee/pkg/storer/internal/upload" localmigration "github.com/ethersphere/bee/pkg/storer/migration" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/topology" "github.com/ethersphere/bee/pkg/util" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/afero" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/filter" "github.com/syndtr/goleveldb/leveldb/opt" "resenje.org/multex" ) // PutterSession provides a session around the storage.Putter. The session on // successful completion commits all the operations or in case of error, rolls back // the state. type PutterSession interface { storage.Putter // Done is used to close the session and optionally assign a swarm.Address to // this session. Done(swarm.Address) error // Cleanup is used to cleanup any state related to this session in case of // any error. Cleanup() error } // SessionInfo is a type which exports the storer tag object. This object // stores all the relevant information about a particular session. type SessionInfo = upload.TagItem // UploadStore is a logical component of the storer which deals with the upload // of data to swarm. type UploadStore interface { // Upload provides a PutterSession which is tied to the tagID. Optionally if // users requests to pin the data, a new pinning collection is created. Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession, error) // NewSession can be used to obtain a tag ID to use for a new Upload session. NewSession() (SessionInfo, error) // Session will show the information about the session. Session(tagID uint64) (SessionInfo, error) // DeleteSession will delete the session info associated with the tag id. DeleteSession(tagID uint64) error // ListSessions will list all the Sessions currently being tracked. ListSessions(offset, limit int) ([]SessionInfo, error) // BatchHint will return the batch ID hint for the chunk reference if known. BatchHint(swarm.Address) ([]byte, error) } // PinStore is a logical component of the storer which deals with pinning // functionality. type PinStore interface { // NewCollection can be used to create a new PutterSession which writes a new // pinning collection. The address passed in during the Done of the session is // used as the root referencce. NewCollection(context.Context) (PutterSession, error) // DeletePin deletes all the chunks associated with the collection pointed to // by the swarm.Address passed in. DeletePin(context.Context, swarm.Address) error // Pins returns all the root references of pinning collections. Pins() ([]swarm.Address, error) // HasPin is a helper which checks if a collection exists with the root // reference passed in. HasPin(swarm.Address) (bool, error) } // PinIterator is a helper interface which can be used to iterate over all the // chunks in a pinning collection. type PinIterator interface { IteratePinCollection(root swarm.Address, iterateFn func(swarm.Address) (bool, error)) error } // CacheStore is a logical component of the storer that deals with cache // content. type CacheStore interface { // Lookup method provides a storage.Getter wrapped around the underlying // ChunkStore which will update cache related indexes if required on successful // lookups. Lookup() storage.Getter // Cache method provides a storage.Putter which will add the chunks to cache. // This will add the chunk to underlying store as well as new indexes which // will keep track of the chunk in the cache. Cache() storage.Putter } // NetStore is a logical component of the storer that deals with network. It will // push/retrieve chunks from the network. type NetStore interface { // DirectUpload provides a session which can be used to push chunks directly // to the network. DirectUpload() PutterSession // Download provides a getter which can be used to download data. If the data // is found locally, its returned immediately, otherwise it is retrieved from // the network. Download(pin bool) storage.Getter // PusherFeed is the feed for direct push chunks. This can be used by the // pusher component to push out the chunks. PusherFeed() <-chan *pusher.Op } var _ Reserve = (*DB)(nil) // Reserve is a logical component of the storer that deals with reserve // content. It will implement all the core functionality required for the protocols. type Reserve interface { ReserveStore EvictBatch(ctx context.Context, batchID []byte) error ReserveSample(context.Context, []byte, uint8, uint64, *big.Int) (Sample, error) ReserveSize() int } // ReserveIterator is a helper interface which can be used to iterate over all // the chunks in the reserve. type ReserveIterator interface { ReserveIterateChunks(cb func(swarm.Chunk) (bool, error)) error } // ReserveStore is a logical component of the storer that deals with reserve // content. It will implement all the core functionality required for the protocols. type ReserveStore interface { ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte) (swarm.Chunk, error) ReserveHas(addr swarm.Address, batchID []byte) (bool, error) ReservePutter() storage.Putter SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error) ReserveLastBinIDs() ([]uint64, error) RadiusChecker } // RadiusChecker provides the radius related functionality. type RadiusChecker interface { IsWithinStorageRadius(addr swarm.Address) bool StorageRadius() uint8 } // LocalStore is a read-only ChunkStore. It can be used to check if chunk is known // locally, but it cannot tell what is the context of the chunk (whether it is // pinned, uploaded, etc.). type LocalStore interface { ChunkStore() storage.ReadOnlyChunkStore } // Debugger is a helper interface which can be used to debug the storer. type Debugger interface { DebugInfo(context.Context) (Info, error) } type memFS struct { afero.Fs } func (m *memFS) Open(path string) (fs.File, error) { return m.Fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) } type dirFS struct { basedir string } func (d *dirFS) Open(path string) (fs.File, error) { return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0644) } var sharkyNoOfShards = 32 var ErrDBQuit = errors.New("db quit") type closerFn func() error func (c closerFn) Close() error { return c() } func closer(closers ...io.Closer) io.Closer { return closerFn(func() error { var err error for _, closer := range closers { err = errors.Join(err, closer.Close()) } return err }) } func initInmemRepository(locker storage.ChunkLocker) (storage.Repository, io.Closer, error) { store, err := leveldbstore.New("", nil) if err != nil { return nil, nil, fmt.Errorf("failed creating inmem levelDB index store: %w", err) } sharky, err := sharky.New( &memFS{Fs: afero.NewMemMapFs()}, sharkyNoOfShards, swarm.SocMaxChunkSize, ) if err != nil { return nil, nil, fmt.Errorf("failed creating inmem sharky instance: %w", err) } txStore := leveldbstore.NewTxStore(store) txChunkStore := chunkstore.NewTxChunkStore(txStore, sharky) return storage.NewRepository(txStore, txChunkStore, locker), closer(store, sharky), nil } // loggerName is the tree path name of the logger for this package. const loggerName = "storer" // Default options for levelDB. const ( defaultOpenFilesLimit = uint64(256) defaultBlockCacheCapacity = uint64(32 * 1024 * 1024) defaultWriteBufferSize = uint64(32 * 1024 * 1024) defaultDisableSeeksCompaction = false defaultCacheCapacity = uint64(1_000_000) defaultBgCacheWorkers = 16 indexPath = "indexstore" sharkyPath = "sharky" ) func initStore(basePath string, opts *Options) (*leveldbstore.Store, error) { ldbBasePath := path.Join(basePath, indexPath) if _, err := os.Stat(ldbBasePath); os.IsNotExist(err) { err := os.MkdirAll(ldbBasePath, 0777) if err != nil { return nil, err } } store, err := leveldbstore.New(path.Join(basePath, "indexstore"), &opt.Options{ OpenFilesCacheCapacity: int(opts.LdbOpenFilesLimit), BlockCacheCapacity: int(opts.LdbBlockCacheCapacity), WriteBuffer: int(opts.LdbWriteBufferSize), DisableSeeksCompaction: opts.LdbDisableSeeksCompaction, CompactionL0Trigger: 8, Filter: filter.NewBloomFilter(64), }) if err != nil { return nil, fmt.Errorf("failed creating levelDB index store: %w", err) } return store, nil } func initDiskRepository( ctx context.Context, basePath string, locker storage.ChunkLocker, opts *Options, ) (storage.Repository, io.Closer, error) { store, err := initStore(basePath, opts) if err != nil { return nil, nil, fmt.Errorf("failed creating levelDB index store: %w", err) } if opts.LdbStats.Load() != nil { go func() { ldbStats := opts.LdbStats.Load() logger := log.NewLogger(loggerName).Register() ticker := time.NewTicker(15 * time.Second) defer ticker.Stop() for { select { case <-ctx.Done(): return case <-ticker.C: stats := new(leveldb.DBStats) switch err := store.DB().Stats(stats); { case errors.Is(err, leveldb.ErrClosed): return case err != nil: logger.Error(err, "snapshot levelDB stats") default: ldbStats.WithLabelValues("write_delay_count").Observe(float64(stats.WriteDelayCount)) ldbStats.WithLabelValues("write_delay_duration").Observe(stats.WriteDelayDuration.Seconds()) ldbStats.WithLabelValues("alive_snapshots").Observe(float64(stats.AliveSnapshots)) ldbStats.WithLabelValues("alive_iterators").Observe(float64(stats.AliveIterators)) ldbStats.WithLabelValues("io_write").Observe(float64(stats.IOWrite)) ldbStats.WithLabelValues("io_read").Observe(float64(stats.IORead)) ldbStats.WithLabelValues("block_cache_size").Observe(float64(stats.BlockCacheSize)) ldbStats.WithLabelValues("opened_tables_count").Observe(float64(stats.OpenedTablesCount)) ldbStats.WithLabelValues("mem_comp").Observe(float64(stats.MemComp)) ldbStats.WithLabelValues("level_0_comp").Observe(float64(stats.Level0Comp)) ldbStats.WithLabelValues("non_level_0_comp").Observe(float64(stats.NonLevel0Comp)) ldbStats.WithLabelValues("seek_comp").Observe(float64(stats.SeekComp)) for i := 0; i < len(stats.LevelSizes); i++ { ldbStats.WithLabelValues(fmt.Sprintf("level_%d_size", i)).Observe(float64(stats.LevelSizes[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_tables_count", i)).Observe(float64(stats.LevelTablesCounts[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_read", i)).Observe(float64(stats.LevelRead[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_write", i)).Observe(float64(stats.LevelWrite[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_duration", i)).Observe(stats.LevelDurations[i].Seconds()) } } } } }() } sharkyBasePath := path.Join(basePath, sharkyPath) if _, err := os.Stat(sharkyBasePath); os.IsNotExist(err) { err := os.Mkdir(sharkyBasePath, 0777) if err != nil { return nil, nil, err } } recoveryCloser, err := sharkyRecovery(ctx, sharkyBasePath, store, opts) if err != nil { return nil, nil, fmt.Errorf("failed to recover sharky: %w", err) } sharky, err := sharky.New( &dirFS{basedir: sharkyBasePath}, sharkyNoOfShards, swarm.SocMaxChunkSize, ) if err != nil { return nil, nil, fmt.Errorf("failed creating sharky instance: %w", err) } txStore := leveldbstore.NewTxStore(store) if err := txStore.Recover(); err != nil { return nil, nil, fmt.Errorf("failed to recover index store: %w", err) } txChunkStore := chunkstore.NewTxChunkStore(txStore, sharky) if err := txChunkStore.Recover(); err != nil { return nil, nil, fmt.Errorf("failed to recover chunk store: %w", err) } return storage.NewRepository(txStore, txChunkStore, locker), closer(store, sharky, recoveryCloser), nil } func initCache(ctx context.Context, capacity uint64, repo storage.Repository) (*cache.Cache, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() txnRepo, commit, rollback := repo.NewTx(ctx) c, err := cache.New(ctx, txnRepo, capacity) if err != nil { return nil, fmt.Errorf("cache.New: %w", errors.Join(err, rollback())) } return c, commit() } type noopRadiusSetter struct{} func (noopRadiusSetter) SetStorageRadius(uint8) {} func performEpochMigration(ctx context.Context, basePath string, opts *Options) (retErr error) { store, err := initStore(basePath, opts) if err != nil { return err } defer store.Close() sharkyBasePath := path.Join(basePath, sharkyPath) var sharkyRecover *sharky.Recovery // if this is a fresh node then perform an empty epoch migration if _, err := os.Stat(sharkyBasePath); err == nil { sharkyRecover, err = sharky.NewRecovery(sharkyBasePath, sharkyNoOfShards, swarm.SocMaxChunkSize) if err != nil { return err } defer sharkyRecover.Close() } logger := opts.Logger.WithName("epochmigration").Register() var rs reservePutter if opts.ReserveCapacity > 0 { rs, err = reserve.New( opts.Address, store, opts.ReserveCapacity, noopRadiusSetter{}, logger, func(_ context.Context, _ internal.Storage, _ ...swarm.Address) error { return nil }, ) if err != nil { return err } } defer func() { if sharkyRecover != nil { retErr = errors.Join(retErr, sharkyRecover.Save()) } }() return epochMigration(ctx, basePath, opts.StateStore, store, rs, sharkyRecover, logger) } const lockKeyNewSession string = "new_session" // Options provides a container to configure different things in the storer. type Options struct { // These are options related to levelDB. Currently, the underlying storage used is levelDB. LdbStats atomic.Pointer[prometheus.HistogramVec] LdbOpenFilesLimit uint64 LdbBlockCacheCapacity uint64 LdbWriteBufferSize uint64 LdbDisableSeeksCompaction bool CacheCapacity uint64 Logger log.Logger Address swarm.Address WarmupDuration time.Duration Batchstore postage.Storer ValidStamp postage.ValidStampFn RadiusSetter topology.SetStorageRadiuser StateStore storage.StateStorer ReserveCapacity int ReserveWakeUpDuration time.Duration } func defaultOptions() *Options { return &Options{ LdbOpenFilesLimit: defaultOpenFilesLimit, LdbBlockCacheCapacity: defaultBlockCacheCapacity, LdbWriteBufferSize: defaultWriteBufferSize, LdbDisableSeeksCompaction: defaultDisableSeeksCompaction, CacheCapacity: defaultCacheCapacity, Logger: log.Noop, ReserveCapacity: 4_194_304, // 2^22 chunks ReserveWakeUpDuration: time.Minute * 15, } } // DB implements all the component stores described above. type DB struct { logger log.Logger metrics metrics repo storage.Repository lock *multex.Multex cacheObj *cache.Cache retrieval retrieval.Interface pusherFeed chan *pusher.Op quit chan struct{} bgCacheLimiter chan struct{} bgCacheLimiterWg sync.WaitGroup dbCloser io.Closer subscriptionsWG sync.WaitGroup events *events.Subscriber directUploadLimiter chan struct{} reserve *reserve.Reserve inFlight *util.WaitingCounter reserveBinEvents *events.Subscriber baseAddr swarm.Address batchstore postage.Storer validStamp postage.ValidStampFn setSyncerOnce sync.Once syncer Syncer opts workerOpts } type workerOpts struct { warmupDuration time.Duration wakeupDuration time.Duration } // New returns a newly constructed DB object which implements all the above // component stores. func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) { var ( repo storage.Repository err error dbCloser io.Closer ) if opts == nil { opts = defaultOptions() } if opts.Logger == nil { opts.Logger = log.Noop } lock := multex.New() metrics := newMetrics() opts.LdbStats.CompareAndSwap(nil, &metrics.LevelDBStats) locker := func(addr swarm.Address) func() { lock.Lock(addr.ByteString()) return func() { lock.Unlock(addr.ByteString()) } } if dirPath == "" { repo, dbCloser, err = initInmemRepository(locker) if err != nil { return nil, err } } else { // only perform migration if not done already if _, err := os.Stat(path.Join(dirPath, indexPath)); err != nil { err = performEpochMigration(ctx, dirPath, opts) if err != nil { return nil, err } } repo, dbCloser, err = initDiskRepository(ctx, dirPath, locker, opts) if err != nil { return nil, err } } sharkyBasePath := "" if dirPath != "" { sharkyBasePath = path.Join(dirPath, sharkyPath) } err = migration.Migrate( repo.IndexStore(), localmigration.AllSteps(sharkyBasePath, sharkyNoOfShards, repo.ChunkStore()), ) if err != nil { return nil, err } cacheObj, err := initCache(ctx, opts.CacheCapacity, repo) if err != nil { return nil, err } logger := opts.Logger.WithName(loggerName).Register() db := &DB{ metrics: metrics, logger: logger, baseAddr: opts.Address, repo: repo, lock: lock, cacheObj: cacheObj, retrieval: noopRetrieval{}, pusherFeed: make(chan *pusher.Op), quit: make(chan struct{}), bgCacheLimiter: make(chan struct{}, 16), dbCloser: dbCloser, batchstore: opts.Batchstore, validStamp: opts.ValidStamp, events: events.NewSubscriber(), reserveBinEvents: events.NewSubscriber(), opts: workerOpts{ warmupDuration: opts.WarmupDuration, wakeupDuration: opts.ReserveWakeUpDuration, }, directUploadLimiter: make(chan struct{}, pusher.ConcurrentPushes), inFlight: new(util.WaitingCounter), } if db.validStamp == nil { db.validStamp = postage.ValidStamp(db.batchstore) } if opts.ReserveCapacity > 0 { rs, err := reserve.New( opts.Address, repo.IndexStore(), opts.ReserveCapacity, opts.RadiusSetter, logger, func(ctx context.Context, store internal.Storage, addrs ...swarm.Address) error { defer func() { db.metrics.CacheSize.Set(float64(db.cacheObj.Size())) }() db.lock.Lock(cacheAccessLockKey) defer db.lock.Unlock(cacheAccessLockKey) return cacheObj.MoveFromReserve(ctx, store, addrs...) }, ) if err != nil { return nil, err } db.reserve = rs db.metrics.StorageRadius.Set(float64(rs.Radius())) db.metrics.ReserveSize.Set(float64(rs.Size())) } db.metrics.CacheSize.Set(float64(db.cacheObj.Size())) // Cleanup any dirty state in upload and pinning stores, this could happen // in case of dirty shutdowns err = errors.Join( upload.CleanupDirty(db), pinstore.CleanupDirty(db), ) if err != nil { return nil, err } return db, nil } // Metrics returns set of prometheus collectors. func (db *DB) Metrics() []prometheus.Collector { collectors := m.PrometheusCollectorsFromFields(db.metrics) if v, ok := db.repo.(m.Collector); ok { collectors = append(collectors, v.Metrics()...) } return collectors } func (db *DB) Close() error { close(db.quit) bgReserveWorkersClosed := make(chan struct{}) go func() { defer close(bgReserveWorkersClosed) if c := db.inFlight.Wait(5 * time.Second); c > 0
}() bgCacheWorkersClosed := make(chan struct{}) go func() { defer close(bgCacheWorkersClosed) db.bgCacheLimiterWg.Wait() }() var err error closerDone := make(chan struct{}) go func() { defer close(closerDone) err = db.dbCloser.Close() }() done := make(chan struct{}) go func() { defer close(done) <-closerDone <-bgCacheWorkersClosed <-bgReserveWorkersClosed }() select { case <-done: case <-time.After(3 * time.Second): return errors.New("storer closed with bg goroutines running") } return err } func (db *DB) SetRetrievalService(r retrieval.Interface) { db.retrieval = r } func (db *DB) StartReserveWorker(ctx context.Context, s Syncer, radius func() (uint8, error)) { db.setSyncerOnce.Do(func() { db.syncer = s go db.startReserveWorkers(ctx, db.opts.warmupDuration, db.opts.wakeupDuration, radius) }) } type noopRetrieval struct{} func (noopRetrieval) RetrieveChunk(_ context.Context, _ swarm.Address, _ swarm.Address) (swarm.Chunk, error) { return nil, storage.ErrNotFound } func (db *DB) ChunkStore() storage.ReadOnlyChunkStore { return db.repo.ChunkStore() } // Execute implements the internal.TxExecutor interface. func (db *DB) Execute(ctx context.Context, do func(internal.Storage) error) error { tx, commit, rollback := db.repo.NewTx(ctx) if err := do(tx); err != nil { return errors.Join(err, rollback()) } return commit() } type putterSession struct { storage.Putter done func(swarm.Address) error cleanup func() error } func (p *putterSession) Done(addr swarm.Address) error { return p.done(addr) } func (p *putterSession) Cleanup() error { return p.cleanup() }
{ db.logger.Warning("db shutting down with running goroutines") }
conditional_block
storer.go
// Copyright 2023 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package storer import ( "context" "errors" "fmt" "io" "io/fs" "math/big" "os" "path" "path/filepath" "sync" "sync/atomic" "time" "github.com/ethersphere/bee/pkg/log" m "github.com/ethersphere/bee/pkg/metrics" "github.com/ethersphere/bee/pkg/postage" "github.com/ethersphere/bee/pkg/pusher" "github.com/ethersphere/bee/pkg/retrieval" "github.com/ethersphere/bee/pkg/sharky" "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage/leveldbstore" "github.com/ethersphere/bee/pkg/storage/migration" "github.com/ethersphere/bee/pkg/storer/internal" "github.com/ethersphere/bee/pkg/storer/internal/cache" "github.com/ethersphere/bee/pkg/storer/internal/chunkstore" "github.com/ethersphere/bee/pkg/storer/internal/events" pinstore "github.com/ethersphere/bee/pkg/storer/internal/pinning" "github.com/ethersphere/bee/pkg/storer/internal/reserve" "github.com/ethersphere/bee/pkg/storer/internal/upload" localmigration "github.com/ethersphere/bee/pkg/storer/migration" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/topology" "github.com/ethersphere/bee/pkg/util" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/afero" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/filter" "github.com/syndtr/goleveldb/leveldb/opt" "resenje.org/multex" ) // PutterSession provides a session around the storage.Putter. The session on // successful completion commits all the operations or in case of error, rolls back // the state. type PutterSession interface { storage.Putter // Done is used to close the session and optionally assign a swarm.Address to // this session. Done(swarm.Address) error // Cleanup is used to cleanup any state related to this session in case of // any error. Cleanup() error } // SessionInfo is a type which exports the storer tag object. This object // stores all the relevant information about a particular session. type SessionInfo = upload.TagItem // UploadStore is a logical component of the storer which deals with the upload // of data to swarm. type UploadStore interface { // Upload provides a PutterSession which is tied to the tagID. Optionally if // users requests to pin the data, a new pinning collection is created. Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession, error) // NewSession can be used to obtain a tag ID to use for a new Upload session. NewSession() (SessionInfo, error) // Session will show the information about the session. Session(tagID uint64) (SessionInfo, error) // DeleteSession will delete the session info associated with the tag id. DeleteSession(tagID uint64) error // ListSessions will list all the Sessions currently being tracked. ListSessions(offset, limit int) ([]SessionInfo, error) // BatchHint will return the batch ID hint for the chunk reference if known. BatchHint(swarm.Address) ([]byte, error) } // PinStore is a logical component of the storer which deals with pinning // functionality. type PinStore interface { // NewCollection can be used to create a new PutterSession which writes a new // pinning collection. The address passed in during the Done of the session is // used as the root referencce. NewCollection(context.Context) (PutterSession, error) // DeletePin deletes all the chunks associated with the collection pointed to // by the swarm.Address passed in. DeletePin(context.Context, swarm.Address) error // Pins returns all the root references of pinning collections. Pins() ([]swarm.Address, error) // HasPin is a helper which checks if a collection exists with the root // reference passed in. HasPin(swarm.Address) (bool, error) } // PinIterator is a helper interface which can be used to iterate over all the // chunks in a pinning collection. type PinIterator interface { IteratePinCollection(root swarm.Address, iterateFn func(swarm.Address) (bool, error)) error } // CacheStore is a logical component of the storer that deals with cache // content. type CacheStore interface { // Lookup method provides a storage.Getter wrapped around the underlying // ChunkStore which will update cache related indexes if required on successful // lookups. Lookup() storage.Getter // Cache method provides a storage.Putter which will add the chunks to cache. // This will add the chunk to underlying store as well as new indexes which // will keep track of the chunk in the cache.
Cache() storage.Putter } // NetStore is a logical component of the storer that deals with network. It will // push/retrieve chunks from the network. type NetStore interface { // DirectUpload provides a session which can be used to push chunks directly // to the network. DirectUpload() PutterSession // Download provides a getter which can be used to download data. If the data // is found locally, its returned immediately, otherwise it is retrieved from // the network. Download(pin bool) storage.Getter // PusherFeed is the feed for direct push chunks. This can be used by the // pusher component to push out the chunks. PusherFeed() <-chan *pusher.Op } var _ Reserve = (*DB)(nil) // Reserve is a logical component of the storer that deals with reserve // content. It will implement all the core functionality required for the protocols. type Reserve interface { ReserveStore EvictBatch(ctx context.Context, batchID []byte) error ReserveSample(context.Context, []byte, uint8, uint64, *big.Int) (Sample, error) ReserveSize() int } // ReserveIterator is a helper interface which can be used to iterate over all // the chunks in the reserve. type ReserveIterator interface { ReserveIterateChunks(cb func(swarm.Chunk) (bool, error)) error } // ReserveStore is a logical component of the storer that deals with reserve // content. It will implement all the core functionality required for the protocols. type ReserveStore interface { ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte) (swarm.Chunk, error) ReserveHas(addr swarm.Address, batchID []byte) (bool, error) ReservePutter() storage.Putter SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error) ReserveLastBinIDs() ([]uint64, error) RadiusChecker } // RadiusChecker provides the radius related functionality. type RadiusChecker interface { IsWithinStorageRadius(addr swarm.Address) bool StorageRadius() uint8 } // LocalStore is a read-only ChunkStore. It can be used to check if chunk is known // locally, but it cannot tell what is the context of the chunk (whether it is // pinned, uploaded, etc.). type LocalStore interface { ChunkStore() storage.ReadOnlyChunkStore } // Debugger is a helper interface which can be used to debug the storer. type Debugger interface { DebugInfo(context.Context) (Info, error) } type memFS struct { afero.Fs } func (m *memFS) Open(path string) (fs.File, error) { return m.Fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) } type dirFS struct { basedir string } func (d *dirFS) Open(path string) (fs.File, error) { return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0644) } var sharkyNoOfShards = 32 var ErrDBQuit = errors.New("db quit") type closerFn func() error func (c closerFn) Close() error { return c() } func closer(closers ...io.Closer) io.Closer { return closerFn(func() error { var err error for _, closer := range closers { err = errors.Join(err, closer.Close()) } return err }) } func initInmemRepository(locker storage.ChunkLocker) (storage.Repository, io.Closer, error) { store, err := leveldbstore.New("", nil) if err != nil { return nil, nil, fmt.Errorf("failed creating inmem levelDB index store: %w", err) } sharky, err := sharky.New( &memFS{Fs: afero.NewMemMapFs()}, sharkyNoOfShards, swarm.SocMaxChunkSize, ) if err != nil { return nil, nil, fmt.Errorf("failed creating inmem sharky instance: %w", err) } txStore := leveldbstore.NewTxStore(store) txChunkStore := chunkstore.NewTxChunkStore(txStore, sharky) return storage.NewRepository(txStore, txChunkStore, locker), closer(store, sharky), nil } // loggerName is the tree path name of the logger for this package. const loggerName = "storer" // Default options for levelDB. const ( defaultOpenFilesLimit = uint64(256) defaultBlockCacheCapacity = uint64(32 * 1024 * 1024) defaultWriteBufferSize = uint64(32 * 1024 * 1024) defaultDisableSeeksCompaction = false defaultCacheCapacity = uint64(1_000_000) defaultBgCacheWorkers = 16 indexPath = "indexstore" sharkyPath = "sharky" ) func initStore(basePath string, opts *Options) (*leveldbstore.Store, error) { ldbBasePath := path.Join(basePath, indexPath) if _, err := os.Stat(ldbBasePath); os.IsNotExist(err) { err := os.MkdirAll(ldbBasePath, 0777) if err != nil { return nil, err } } store, err := leveldbstore.New(path.Join(basePath, "indexstore"), &opt.Options{ OpenFilesCacheCapacity: int(opts.LdbOpenFilesLimit), BlockCacheCapacity: int(opts.LdbBlockCacheCapacity), WriteBuffer: int(opts.LdbWriteBufferSize), DisableSeeksCompaction: opts.LdbDisableSeeksCompaction, CompactionL0Trigger: 8, Filter: filter.NewBloomFilter(64), }) if err != nil { return nil, fmt.Errorf("failed creating levelDB index store: %w", err) } return store, nil } func initDiskRepository( ctx context.Context, basePath string, locker storage.ChunkLocker, opts *Options, ) (storage.Repository, io.Closer, error) { store, err := initStore(basePath, opts) if err != nil { return nil, nil, fmt.Errorf("failed creating levelDB index store: %w", err) } if opts.LdbStats.Load() != nil { go func() { ldbStats := opts.LdbStats.Load() logger := log.NewLogger(loggerName).Register() ticker := time.NewTicker(15 * time.Second) defer ticker.Stop() for { select { case <-ctx.Done(): return case <-ticker.C: stats := new(leveldb.DBStats) switch err := store.DB().Stats(stats); { case errors.Is(err, leveldb.ErrClosed): return case err != nil: logger.Error(err, "snapshot levelDB stats") default: ldbStats.WithLabelValues("write_delay_count").Observe(float64(stats.WriteDelayCount)) ldbStats.WithLabelValues("write_delay_duration").Observe(stats.WriteDelayDuration.Seconds()) ldbStats.WithLabelValues("alive_snapshots").Observe(float64(stats.AliveSnapshots)) ldbStats.WithLabelValues("alive_iterators").Observe(float64(stats.AliveIterators)) ldbStats.WithLabelValues("io_write").Observe(float64(stats.IOWrite)) ldbStats.WithLabelValues("io_read").Observe(float64(stats.IORead)) ldbStats.WithLabelValues("block_cache_size").Observe(float64(stats.BlockCacheSize)) ldbStats.WithLabelValues("opened_tables_count").Observe(float64(stats.OpenedTablesCount)) ldbStats.WithLabelValues("mem_comp").Observe(float64(stats.MemComp)) ldbStats.WithLabelValues("level_0_comp").Observe(float64(stats.Level0Comp)) ldbStats.WithLabelValues("non_level_0_comp").Observe(float64(stats.NonLevel0Comp)) ldbStats.WithLabelValues("seek_comp").Observe(float64(stats.SeekComp)) for i := 0; i < len(stats.LevelSizes); i++ { ldbStats.WithLabelValues(fmt.Sprintf("level_%d_size", i)).Observe(float64(stats.LevelSizes[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_tables_count", i)).Observe(float64(stats.LevelTablesCounts[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_read", i)).Observe(float64(stats.LevelRead[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_write", i)).Observe(float64(stats.LevelWrite[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_duration", i)).Observe(stats.LevelDurations[i].Seconds()) } } } } }() } sharkyBasePath := path.Join(basePath, sharkyPath) if _, err := os.Stat(sharkyBasePath); os.IsNotExist(err) { err := os.Mkdir(sharkyBasePath, 0777) if err != nil { return nil, nil, err } } recoveryCloser, err := sharkyRecovery(ctx, sharkyBasePath, store, opts) if err != nil { return nil, nil, fmt.Errorf("failed to recover sharky: %w", err) } sharky, err := sharky.New( &dirFS{basedir: sharkyBasePath}, sharkyNoOfShards, swarm.SocMaxChunkSize, ) if err != nil { return nil, nil, fmt.Errorf("failed creating sharky instance: %w", err) } txStore := leveldbstore.NewTxStore(store) if err := txStore.Recover(); err != nil { return nil, nil, fmt.Errorf("failed to recover index store: %w", err) } txChunkStore := chunkstore.NewTxChunkStore(txStore, sharky) if err := txChunkStore.Recover(); err != nil { return nil, nil, fmt.Errorf("failed to recover chunk store: %w", err) } return storage.NewRepository(txStore, txChunkStore, locker), closer(store, sharky, recoveryCloser), nil } func initCache(ctx context.Context, capacity uint64, repo storage.Repository) (*cache.Cache, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() txnRepo, commit, rollback := repo.NewTx(ctx) c, err := cache.New(ctx, txnRepo, capacity) if err != nil { return nil, fmt.Errorf("cache.New: %w", errors.Join(err, rollback())) } return c, commit() } type noopRadiusSetter struct{} func (noopRadiusSetter) SetStorageRadius(uint8) {} func performEpochMigration(ctx context.Context, basePath string, opts *Options) (retErr error) { store, err := initStore(basePath, opts) if err != nil { return err } defer store.Close() sharkyBasePath := path.Join(basePath, sharkyPath) var sharkyRecover *sharky.Recovery // if this is a fresh node then perform an empty epoch migration if _, err := os.Stat(sharkyBasePath); err == nil { sharkyRecover, err = sharky.NewRecovery(sharkyBasePath, sharkyNoOfShards, swarm.SocMaxChunkSize) if err != nil { return err } defer sharkyRecover.Close() } logger := opts.Logger.WithName("epochmigration").Register() var rs reservePutter if opts.ReserveCapacity > 0 { rs, err = reserve.New( opts.Address, store, opts.ReserveCapacity, noopRadiusSetter{}, logger, func(_ context.Context, _ internal.Storage, _ ...swarm.Address) error { return nil }, ) if err != nil { return err } } defer func() { if sharkyRecover != nil { retErr = errors.Join(retErr, sharkyRecover.Save()) } }() return epochMigration(ctx, basePath, opts.StateStore, store, rs, sharkyRecover, logger) } const lockKeyNewSession string = "new_session" // Options provides a container to configure different things in the storer. type Options struct { // These are options related to levelDB. Currently, the underlying storage used is levelDB. LdbStats atomic.Pointer[prometheus.HistogramVec] LdbOpenFilesLimit uint64 LdbBlockCacheCapacity uint64 LdbWriteBufferSize uint64 LdbDisableSeeksCompaction bool CacheCapacity uint64 Logger log.Logger Address swarm.Address WarmupDuration time.Duration Batchstore postage.Storer ValidStamp postage.ValidStampFn RadiusSetter topology.SetStorageRadiuser StateStore storage.StateStorer ReserveCapacity int ReserveWakeUpDuration time.Duration } func defaultOptions() *Options { return &Options{ LdbOpenFilesLimit: defaultOpenFilesLimit, LdbBlockCacheCapacity: defaultBlockCacheCapacity, LdbWriteBufferSize: defaultWriteBufferSize, LdbDisableSeeksCompaction: defaultDisableSeeksCompaction, CacheCapacity: defaultCacheCapacity, Logger: log.Noop, ReserveCapacity: 4_194_304, // 2^22 chunks ReserveWakeUpDuration: time.Minute * 15, } } // DB implements all the component stores described above. type DB struct { logger log.Logger metrics metrics repo storage.Repository lock *multex.Multex cacheObj *cache.Cache retrieval retrieval.Interface pusherFeed chan *pusher.Op quit chan struct{} bgCacheLimiter chan struct{} bgCacheLimiterWg sync.WaitGroup dbCloser io.Closer subscriptionsWG sync.WaitGroup events *events.Subscriber directUploadLimiter chan struct{} reserve *reserve.Reserve inFlight *util.WaitingCounter reserveBinEvents *events.Subscriber baseAddr swarm.Address batchstore postage.Storer validStamp postage.ValidStampFn setSyncerOnce sync.Once syncer Syncer opts workerOpts } type workerOpts struct { warmupDuration time.Duration wakeupDuration time.Duration } // New returns a newly constructed DB object which implements all the above // component stores. func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) { var ( repo storage.Repository err error dbCloser io.Closer ) if opts == nil { opts = defaultOptions() } if opts.Logger == nil { opts.Logger = log.Noop } lock := multex.New() metrics := newMetrics() opts.LdbStats.CompareAndSwap(nil, &metrics.LevelDBStats) locker := func(addr swarm.Address) func() { lock.Lock(addr.ByteString()) return func() { lock.Unlock(addr.ByteString()) } } if dirPath == "" { repo, dbCloser, err = initInmemRepository(locker) if err != nil { return nil, err } } else { // only perform migration if not done already if _, err := os.Stat(path.Join(dirPath, indexPath)); err != nil { err = performEpochMigration(ctx, dirPath, opts) if err != nil { return nil, err } } repo, dbCloser, err = initDiskRepository(ctx, dirPath, locker, opts) if err != nil { return nil, err } } sharkyBasePath := "" if dirPath != "" { sharkyBasePath = path.Join(dirPath, sharkyPath) } err = migration.Migrate( repo.IndexStore(), localmigration.AllSteps(sharkyBasePath, sharkyNoOfShards, repo.ChunkStore()), ) if err != nil { return nil, err } cacheObj, err := initCache(ctx, opts.CacheCapacity, repo) if err != nil { return nil, err } logger := opts.Logger.WithName(loggerName).Register() db := &DB{ metrics: metrics, logger: logger, baseAddr: opts.Address, repo: repo, lock: lock, cacheObj: cacheObj, retrieval: noopRetrieval{}, pusherFeed: make(chan *pusher.Op), quit: make(chan struct{}), bgCacheLimiter: make(chan struct{}, 16), dbCloser: dbCloser, batchstore: opts.Batchstore, validStamp: opts.ValidStamp, events: events.NewSubscriber(), reserveBinEvents: events.NewSubscriber(), opts: workerOpts{ warmupDuration: opts.WarmupDuration, wakeupDuration: opts.ReserveWakeUpDuration, }, directUploadLimiter: make(chan struct{}, pusher.ConcurrentPushes), inFlight: new(util.WaitingCounter), } if db.validStamp == nil { db.validStamp = postage.ValidStamp(db.batchstore) } if opts.ReserveCapacity > 0 { rs, err := reserve.New( opts.Address, repo.IndexStore(), opts.ReserveCapacity, opts.RadiusSetter, logger, func(ctx context.Context, store internal.Storage, addrs ...swarm.Address) error { defer func() { db.metrics.CacheSize.Set(float64(db.cacheObj.Size())) }() db.lock.Lock(cacheAccessLockKey) defer db.lock.Unlock(cacheAccessLockKey) return cacheObj.MoveFromReserve(ctx, store, addrs...) }, ) if err != nil { return nil, err } db.reserve = rs db.metrics.StorageRadius.Set(float64(rs.Radius())) db.metrics.ReserveSize.Set(float64(rs.Size())) } db.metrics.CacheSize.Set(float64(db.cacheObj.Size())) // Cleanup any dirty state in upload and pinning stores, this could happen // in case of dirty shutdowns err = errors.Join( upload.CleanupDirty(db), pinstore.CleanupDirty(db), ) if err != nil { return nil, err } return db, nil } // Metrics returns set of prometheus collectors. func (db *DB) Metrics() []prometheus.Collector { collectors := m.PrometheusCollectorsFromFields(db.metrics) if v, ok := db.repo.(m.Collector); ok { collectors = append(collectors, v.Metrics()...) } return collectors } func (db *DB) Close() error { close(db.quit) bgReserveWorkersClosed := make(chan struct{}) go func() { defer close(bgReserveWorkersClosed) if c := db.inFlight.Wait(5 * time.Second); c > 0 { db.logger.Warning("db shutting down with running goroutines") } }() bgCacheWorkersClosed := make(chan struct{}) go func() { defer close(bgCacheWorkersClosed) db.bgCacheLimiterWg.Wait() }() var err error closerDone := make(chan struct{}) go func() { defer close(closerDone) err = db.dbCloser.Close() }() done := make(chan struct{}) go func() { defer close(done) <-closerDone <-bgCacheWorkersClosed <-bgReserveWorkersClosed }() select { case <-done: case <-time.After(3 * time.Second): return errors.New("storer closed with bg goroutines running") } return err } func (db *DB) SetRetrievalService(r retrieval.Interface) { db.retrieval = r } func (db *DB) StartReserveWorker(ctx context.Context, s Syncer, radius func() (uint8, error)) { db.setSyncerOnce.Do(func() { db.syncer = s go db.startReserveWorkers(ctx, db.opts.warmupDuration, db.opts.wakeupDuration, radius) }) } type noopRetrieval struct{} func (noopRetrieval) RetrieveChunk(_ context.Context, _ swarm.Address, _ swarm.Address) (swarm.Chunk, error) { return nil, storage.ErrNotFound } func (db *DB) ChunkStore() storage.ReadOnlyChunkStore { return db.repo.ChunkStore() } // Execute implements the internal.TxExecutor interface. func (db *DB) Execute(ctx context.Context, do func(internal.Storage) error) error { tx, commit, rollback := db.repo.NewTx(ctx) if err := do(tx); err != nil { return errors.Join(err, rollback()) } return commit() } type putterSession struct { storage.Putter done func(swarm.Address) error cleanup func() error } func (p *putterSession) Done(addr swarm.Address) error { return p.done(addr) } func (p *putterSession) Cleanup() error { return p.cleanup() }
random_line_split
storer.go
// Copyright 2023 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package storer import ( "context" "errors" "fmt" "io" "io/fs" "math/big" "os" "path" "path/filepath" "sync" "sync/atomic" "time" "github.com/ethersphere/bee/pkg/log" m "github.com/ethersphere/bee/pkg/metrics" "github.com/ethersphere/bee/pkg/postage" "github.com/ethersphere/bee/pkg/pusher" "github.com/ethersphere/bee/pkg/retrieval" "github.com/ethersphere/bee/pkg/sharky" "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage/leveldbstore" "github.com/ethersphere/bee/pkg/storage/migration" "github.com/ethersphere/bee/pkg/storer/internal" "github.com/ethersphere/bee/pkg/storer/internal/cache" "github.com/ethersphere/bee/pkg/storer/internal/chunkstore" "github.com/ethersphere/bee/pkg/storer/internal/events" pinstore "github.com/ethersphere/bee/pkg/storer/internal/pinning" "github.com/ethersphere/bee/pkg/storer/internal/reserve" "github.com/ethersphere/bee/pkg/storer/internal/upload" localmigration "github.com/ethersphere/bee/pkg/storer/migration" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/topology" "github.com/ethersphere/bee/pkg/util" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/afero" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/filter" "github.com/syndtr/goleveldb/leveldb/opt" "resenje.org/multex" ) // PutterSession provides a session around the storage.Putter. The session on // successful completion commits all the operations or in case of error, rolls back // the state. type PutterSession interface { storage.Putter // Done is used to close the session and optionally assign a swarm.Address to // this session. Done(swarm.Address) error // Cleanup is used to cleanup any state related to this session in case of // any error. Cleanup() error } // SessionInfo is a type which exports the storer tag object. This object // stores all the relevant information about a particular session. type SessionInfo = upload.TagItem // UploadStore is a logical component of the storer which deals with the upload // of data to swarm. type UploadStore interface { // Upload provides a PutterSession which is tied to the tagID. Optionally if // users requests to pin the data, a new pinning collection is created. Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession, error) // NewSession can be used to obtain a tag ID to use for a new Upload session. NewSession() (SessionInfo, error) // Session will show the information about the session. Session(tagID uint64) (SessionInfo, error) // DeleteSession will delete the session info associated with the tag id. DeleteSession(tagID uint64) error // ListSessions will list all the Sessions currently being tracked. ListSessions(offset, limit int) ([]SessionInfo, error) // BatchHint will return the batch ID hint for the chunk reference if known. BatchHint(swarm.Address) ([]byte, error) } // PinStore is a logical component of the storer which deals with pinning // functionality. type PinStore interface { // NewCollection can be used to create a new PutterSession which writes a new // pinning collection. The address passed in during the Done of the session is // used as the root referencce. NewCollection(context.Context) (PutterSession, error) // DeletePin deletes all the chunks associated with the collection pointed to // by the swarm.Address passed in. DeletePin(context.Context, swarm.Address) error // Pins returns all the root references of pinning collections. Pins() ([]swarm.Address, error) // HasPin is a helper which checks if a collection exists with the root // reference passed in. HasPin(swarm.Address) (bool, error) } // PinIterator is a helper interface which can be used to iterate over all the // chunks in a pinning collection. type PinIterator interface { IteratePinCollection(root swarm.Address, iterateFn func(swarm.Address) (bool, error)) error } // CacheStore is a logical component of the storer that deals with cache // content. type CacheStore interface { // Lookup method provides a storage.Getter wrapped around the underlying // ChunkStore which will update cache related indexes if required on successful // lookups. Lookup() storage.Getter // Cache method provides a storage.Putter which will add the chunks to cache. // This will add the chunk to underlying store as well as new indexes which // will keep track of the chunk in the cache. Cache() storage.Putter } // NetStore is a logical component of the storer that deals with network. It will // push/retrieve chunks from the network. type NetStore interface { // DirectUpload provides a session which can be used to push chunks directly // to the network. DirectUpload() PutterSession // Download provides a getter which can be used to download data. If the data // is found locally, its returned immediately, otherwise it is retrieved from // the network. Download(pin bool) storage.Getter // PusherFeed is the feed for direct push chunks. This can be used by the // pusher component to push out the chunks. PusherFeed() <-chan *pusher.Op } var _ Reserve = (*DB)(nil) // Reserve is a logical component of the storer that deals with reserve // content. It will implement all the core functionality required for the protocols. type Reserve interface { ReserveStore EvictBatch(ctx context.Context, batchID []byte) error ReserveSample(context.Context, []byte, uint8, uint64, *big.Int) (Sample, error) ReserveSize() int } // ReserveIterator is a helper interface which can be used to iterate over all // the chunks in the reserve. type ReserveIterator interface { ReserveIterateChunks(cb func(swarm.Chunk) (bool, error)) error } // ReserveStore is a logical component of the storer that deals with reserve // content. It will implement all the core functionality required for the protocols. type ReserveStore interface { ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte) (swarm.Chunk, error) ReserveHas(addr swarm.Address, batchID []byte) (bool, error) ReservePutter() storage.Putter SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error) ReserveLastBinIDs() ([]uint64, error) RadiusChecker } // RadiusChecker provides the radius related functionality. type RadiusChecker interface { IsWithinStorageRadius(addr swarm.Address) bool StorageRadius() uint8 } // LocalStore is a read-only ChunkStore. It can be used to check if chunk is known // locally, but it cannot tell what is the context of the chunk (whether it is // pinned, uploaded, etc.). type LocalStore interface { ChunkStore() storage.ReadOnlyChunkStore } // Debugger is a helper interface which can be used to debug the storer. type Debugger interface { DebugInfo(context.Context) (Info, error) } type memFS struct { afero.Fs } func (m *memFS) Open(path string) (fs.File, error) { return m.Fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) } type dirFS struct { basedir string } func (d *dirFS) Open(path string) (fs.File, error) { return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0644) } var sharkyNoOfShards = 32 var ErrDBQuit = errors.New("db quit") type closerFn func() error func (c closerFn) Close() error { return c() } func closer(closers ...io.Closer) io.Closer { return closerFn(func() error { var err error for _, closer := range closers { err = errors.Join(err, closer.Close()) } return err }) } func initInmemRepository(locker storage.ChunkLocker) (storage.Repository, io.Closer, error) { store, err := leveldbstore.New("", nil) if err != nil { return nil, nil, fmt.Errorf("failed creating inmem levelDB index store: %w", err) } sharky, err := sharky.New( &memFS{Fs: afero.NewMemMapFs()}, sharkyNoOfShards, swarm.SocMaxChunkSize, ) if err != nil { return nil, nil, fmt.Errorf("failed creating inmem sharky instance: %w", err) } txStore := leveldbstore.NewTxStore(store) txChunkStore := chunkstore.NewTxChunkStore(txStore, sharky) return storage.NewRepository(txStore, txChunkStore, locker), closer(store, sharky), nil } // loggerName is the tree path name of the logger for this package. const loggerName = "storer" // Default options for levelDB. const ( defaultOpenFilesLimit = uint64(256) defaultBlockCacheCapacity = uint64(32 * 1024 * 1024) defaultWriteBufferSize = uint64(32 * 1024 * 1024) defaultDisableSeeksCompaction = false defaultCacheCapacity = uint64(1_000_000) defaultBgCacheWorkers = 16 indexPath = "indexstore" sharkyPath = "sharky" ) func initStore(basePath string, opts *Options) (*leveldbstore.Store, error) { ldbBasePath := path.Join(basePath, indexPath) if _, err := os.Stat(ldbBasePath); os.IsNotExist(err) { err := os.MkdirAll(ldbBasePath, 0777) if err != nil { return nil, err } } store, err := leveldbstore.New(path.Join(basePath, "indexstore"), &opt.Options{ OpenFilesCacheCapacity: int(opts.LdbOpenFilesLimit), BlockCacheCapacity: int(opts.LdbBlockCacheCapacity), WriteBuffer: int(opts.LdbWriteBufferSize), DisableSeeksCompaction: opts.LdbDisableSeeksCompaction, CompactionL0Trigger: 8, Filter: filter.NewBloomFilter(64), }) if err != nil { return nil, fmt.Errorf("failed creating levelDB index store: %w", err) } return store, nil } func initDiskRepository( ctx context.Context, basePath string, locker storage.ChunkLocker, opts *Options, ) (storage.Repository, io.Closer, error) { store, err := initStore(basePath, opts) if err != nil { return nil, nil, fmt.Errorf("failed creating levelDB index store: %w", err) } if opts.LdbStats.Load() != nil { go func() { ldbStats := opts.LdbStats.Load() logger := log.NewLogger(loggerName).Register() ticker := time.NewTicker(15 * time.Second) defer ticker.Stop() for { select { case <-ctx.Done(): return case <-ticker.C: stats := new(leveldb.DBStats) switch err := store.DB().Stats(stats); { case errors.Is(err, leveldb.ErrClosed): return case err != nil: logger.Error(err, "snapshot levelDB stats") default: ldbStats.WithLabelValues("write_delay_count").Observe(float64(stats.WriteDelayCount)) ldbStats.WithLabelValues("write_delay_duration").Observe(stats.WriteDelayDuration.Seconds()) ldbStats.WithLabelValues("alive_snapshots").Observe(float64(stats.AliveSnapshots)) ldbStats.WithLabelValues("alive_iterators").Observe(float64(stats.AliveIterators)) ldbStats.WithLabelValues("io_write").Observe(float64(stats.IOWrite)) ldbStats.WithLabelValues("io_read").Observe(float64(stats.IORead)) ldbStats.WithLabelValues("block_cache_size").Observe(float64(stats.BlockCacheSize)) ldbStats.WithLabelValues("opened_tables_count").Observe(float64(stats.OpenedTablesCount)) ldbStats.WithLabelValues("mem_comp").Observe(float64(stats.MemComp)) ldbStats.WithLabelValues("level_0_comp").Observe(float64(stats.Level0Comp)) ldbStats.WithLabelValues("non_level_0_comp").Observe(float64(stats.NonLevel0Comp)) ldbStats.WithLabelValues("seek_comp").Observe(float64(stats.SeekComp)) for i := 0; i < len(stats.LevelSizes); i++ { ldbStats.WithLabelValues(fmt.Sprintf("level_%d_size", i)).Observe(float64(stats.LevelSizes[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_tables_count", i)).Observe(float64(stats.LevelTablesCounts[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_read", i)).Observe(float64(stats.LevelRead[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_write", i)).Observe(float64(stats.LevelWrite[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_duration", i)).Observe(stats.LevelDurations[i].Seconds()) } } } } }() } sharkyBasePath := path.Join(basePath, sharkyPath) if _, err := os.Stat(sharkyBasePath); os.IsNotExist(err) { err := os.Mkdir(sharkyBasePath, 0777) if err != nil { return nil, nil, err } } recoveryCloser, err := sharkyRecovery(ctx, sharkyBasePath, store, opts) if err != nil { return nil, nil, fmt.Errorf("failed to recover sharky: %w", err) } sharky, err := sharky.New( &dirFS{basedir: sharkyBasePath}, sharkyNoOfShards, swarm.SocMaxChunkSize, ) if err != nil { return nil, nil, fmt.Errorf("failed creating sharky instance: %w", err) } txStore := leveldbstore.NewTxStore(store) if err := txStore.Recover(); err != nil { return nil, nil, fmt.Errorf("failed to recover index store: %w", err) } txChunkStore := chunkstore.NewTxChunkStore(txStore, sharky) if err := txChunkStore.Recover(); err != nil { return nil, nil, fmt.Errorf("failed to recover chunk store: %w", err) } return storage.NewRepository(txStore, txChunkStore, locker), closer(store, sharky, recoveryCloser), nil } func initCache(ctx context.Context, capacity uint64, repo storage.Repository) (*cache.Cache, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() txnRepo, commit, rollback := repo.NewTx(ctx) c, err := cache.New(ctx, txnRepo, capacity) if err != nil { return nil, fmt.Errorf("cache.New: %w", errors.Join(err, rollback())) } return c, commit() } type noopRadiusSetter struct{} func (noopRadiusSetter)
(uint8) {} func performEpochMigration(ctx context.Context, basePath string, opts *Options) (retErr error) { store, err := initStore(basePath, opts) if err != nil { return err } defer store.Close() sharkyBasePath := path.Join(basePath, sharkyPath) var sharkyRecover *sharky.Recovery // if this is a fresh node then perform an empty epoch migration if _, err := os.Stat(sharkyBasePath); err == nil { sharkyRecover, err = sharky.NewRecovery(sharkyBasePath, sharkyNoOfShards, swarm.SocMaxChunkSize) if err != nil { return err } defer sharkyRecover.Close() } logger := opts.Logger.WithName("epochmigration").Register() var rs reservePutter if opts.ReserveCapacity > 0 { rs, err = reserve.New( opts.Address, store, opts.ReserveCapacity, noopRadiusSetter{}, logger, func(_ context.Context, _ internal.Storage, _ ...swarm.Address) error { return nil }, ) if err != nil { return err } } defer func() { if sharkyRecover != nil { retErr = errors.Join(retErr, sharkyRecover.Save()) } }() return epochMigration(ctx, basePath, opts.StateStore, store, rs, sharkyRecover, logger) } const lockKeyNewSession string = "new_session" // Options provides a container to configure different things in the storer. type Options struct { // These are options related to levelDB. Currently, the underlying storage used is levelDB. LdbStats atomic.Pointer[prometheus.HistogramVec] LdbOpenFilesLimit uint64 LdbBlockCacheCapacity uint64 LdbWriteBufferSize uint64 LdbDisableSeeksCompaction bool CacheCapacity uint64 Logger log.Logger Address swarm.Address WarmupDuration time.Duration Batchstore postage.Storer ValidStamp postage.ValidStampFn RadiusSetter topology.SetStorageRadiuser StateStore storage.StateStorer ReserveCapacity int ReserveWakeUpDuration time.Duration } func defaultOptions() *Options { return &Options{ LdbOpenFilesLimit: defaultOpenFilesLimit, LdbBlockCacheCapacity: defaultBlockCacheCapacity, LdbWriteBufferSize: defaultWriteBufferSize, LdbDisableSeeksCompaction: defaultDisableSeeksCompaction, CacheCapacity: defaultCacheCapacity, Logger: log.Noop, ReserveCapacity: 4_194_304, // 2^22 chunks ReserveWakeUpDuration: time.Minute * 15, } } // DB implements all the component stores described above. type DB struct { logger log.Logger metrics metrics repo storage.Repository lock *multex.Multex cacheObj *cache.Cache retrieval retrieval.Interface pusherFeed chan *pusher.Op quit chan struct{} bgCacheLimiter chan struct{} bgCacheLimiterWg sync.WaitGroup dbCloser io.Closer subscriptionsWG sync.WaitGroup events *events.Subscriber directUploadLimiter chan struct{} reserve *reserve.Reserve inFlight *util.WaitingCounter reserveBinEvents *events.Subscriber baseAddr swarm.Address batchstore postage.Storer validStamp postage.ValidStampFn setSyncerOnce sync.Once syncer Syncer opts workerOpts } type workerOpts struct { warmupDuration time.Duration wakeupDuration time.Duration } // New returns a newly constructed DB object which implements all the above // component stores. func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) { var ( repo storage.Repository err error dbCloser io.Closer ) if opts == nil { opts = defaultOptions() } if opts.Logger == nil { opts.Logger = log.Noop } lock := multex.New() metrics := newMetrics() opts.LdbStats.CompareAndSwap(nil, &metrics.LevelDBStats) locker := func(addr swarm.Address) func() { lock.Lock(addr.ByteString()) return func() { lock.Unlock(addr.ByteString()) } } if dirPath == "" { repo, dbCloser, err = initInmemRepository(locker) if err != nil { return nil, err } } else { // only perform migration if not done already if _, err := os.Stat(path.Join(dirPath, indexPath)); err != nil { err = performEpochMigration(ctx, dirPath, opts) if err != nil { return nil, err } } repo, dbCloser, err = initDiskRepository(ctx, dirPath, locker, opts) if err != nil { return nil, err } } sharkyBasePath := "" if dirPath != "" { sharkyBasePath = path.Join(dirPath, sharkyPath) } err = migration.Migrate( repo.IndexStore(), localmigration.AllSteps(sharkyBasePath, sharkyNoOfShards, repo.ChunkStore()), ) if err != nil { return nil, err } cacheObj, err := initCache(ctx, opts.CacheCapacity, repo) if err != nil { return nil, err } logger := opts.Logger.WithName(loggerName).Register() db := &DB{ metrics: metrics, logger: logger, baseAddr: opts.Address, repo: repo, lock: lock, cacheObj: cacheObj, retrieval: noopRetrieval{}, pusherFeed: make(chan *pusher.Op), quit: make(chan struct{}), bgCacheLimiter: make(chan struct{}, 16), dbCloser: dbCloser, batchstore: opts.Batchstore, validStamp: opts.ValidStamp, events: events.NewSubscriber(), reserveBinEvents: events.NewSubscriber(), opts: workerOpts{ warmupDuration: opts.WarmupDuration, wakeupDuration: opts.ReserveWakeUpDuration, }, directUploadLimiter: make(chan struct{}, pusher.ConcurrentPushes), inFlight: new(util.WaitingCounter), } if db.validStamp == nil { db.validStamp = postage.ValidStamp(db.batchstore) } if opts.ReserveCapacity > 0 { rs, err := reserve.New( opts.Address, repo.IndexStore(), opts.ReserveCapacity, opts.RadiusSetter, logger, func(ctx context.Context, store internal.Storage, addrs ...swarm.Address) error { defer func() { db.metrics.CacheSize.Set(float64(db.cacheObj.Size())) }() db.lock.Lock(cacheAccessLockKey) defer db.lock.Unlock(cacheAccessLockKey) return cacheObj.MoveFromReserve(ctx, store, addrs...) }, ) if err != nil { return nil, err } db.reserve = rs db.metrics.StorageRadius.Set(float64(rs.Radius())) db.metrics.ReserveSize.Set(float64(rs.Size())) } db.metrics.CacheSize.Set(float64(db.cacheObj.Size())) // Cleanup any dirty state in upload and pinning stores, this could happen // in case of dirty shutdowns err = errors.Join( upload.CleanupDirty(db), pinstore.CleanupDirty(db), ) if err != nil { return nil, err } return db, nil } // Metrics returns set of prometheus collectors. func (db *DB) Metrics() []prometheus.Collector { collectors := m.PrometheusCollectorsFromFields(db.metrics) if v, ok := db.repo.(m.Collector); ok { collectors = append(collectors, v.Metrics()...) } return collectors } func (db *DB) Close() error { close(db.quit) bgReserveWorkersClosed := make(chan struct{}) go func() { defer close(bgReserveWorkersClosed) if c := db.inFlight.Wait(5 * time.Second); c > 0 { db.logger.Warning("db shutting down with running goroutines") } }() bgCacheWorkersClosed := make(chan struct{}) go func() { defer close(bgCacheWorkersClosed) db.bgCacheLimiterWg.Wait() }() var err error closerDone := make(chan struct{}) go func() { defer close(closerDone) err = db.dbCloser.Close() }() done := make(chan struct{}) go func() { defer close(done) <-closerDone <-bgCacheWorkersClosed <-bgReserveWorkersClosed }() select { case <-done: case <-time.After(3 * time.Second): return errors.New("storer closed with bg goroutines running") } return err } func (db *DB) SetRetrievalService(r retrieval.Interface) { db.retrieval = r } func (db *DB) StartReserveWorker(ctx context.Context, s Syncer, radius func() (uint8, error)) { db.setSyncerOnce.Do(func() { db.syncer = s go db.startReserveWorkers(ctx, db.opts.warmupDuration, db.opts.wakeupDuration, radius) }) } type noopRetrieval struct{} func (noopRetrieval) RetrieveChunk(_ context.Context, _ swarm.Address, _ swarm.Address) (swarm.Chunk, error) { return nil, storage.ErrNotFound } func (db *DB) ChunkStore() storage.ReadOnlyChunkStore { return db.repo.ChunkStore() } // Execute implements the internal.TxExecutor interface. func (db *DB) Execute(ctx context.Context, do func(internal.Storage) error) error { tx, commit, rollback := db.repo.NewTx(ctx) if err := do(tx); err != nil { return errors.Join(err, rollback()) } return commit() } type putterSession struct { storage.Putter done func(swarm.Address) error cleanup func() error } func (p *putterSession) Done(addr swarm.Address) error { return p.done(addr) } func (p *putterSession) Cleanup() error { return p.cleanup() }
SetStorageRadius
identifier_name
storer.go
// Copyright 2023 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package storer import ( "context" "errors" "fmt" "io" "io/fs" "math/big" "os" "path" "path/filepath" "sync" "sync/atomic" "time" "github.com/ethersphere/bee/pkg/log" m "github.com/ethersphere/bee/pkg/metrics" "github.com/ethersphere/bee/pkg/postage" "github.com/ethersphere/bee/pkg/pusher" "github.com/ethersphere/bee/pkg/retrieval" "github.com/ethersphere/bee/pkg/sharky" "github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage/leveldbstore" "github.com/ethersphere/bee/pkg/storage/migration" "github.com/ethersphere/bee/pkg/storer/internal" "github.com/ethersphere/bee/pkg/storer/internal/cache" "github.com/ethersphere/bee/pkg/storer/internal/chunkstore" "github.com/ethersphere/bee/pkg/storer/internal/events" pinstore "github.com/ethersphere/bee/pkg/storer/internal/pinning" "github.com/ethersphere/bee/pkg/storer/internal/reserve" "github.com/ethersphere/bee/pkg/storer/internal/upload" localmigration "github.com/ethersphere/bee/pkg/storer/migration" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/topology" "github.com/ethersphere/bee/pkg/util" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/afero" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/filter" "github.com/syndtr/goleveldb/leveldb/opt" "resenje.org/multex" ) // PutterSession provides a session around the storage.Putter. The session on // successful completion commits all the operations or in case of error, rolls back // the state. type PutterSession interface { storage.Putter // Done is used to close the session and optionally assign a swarm.Address to // this session. Done(swarm.Address) error // Cleanup is used to cleanup any state related to this session in case of // any error. Cleanup() error } // SessionInfo is a type which exports the storer tag object. This object // stores all the relevant information about a particular session. type SessionInfo = upload.TagItem // UploadStore is a logical component of the storer which deals with the upload // of data to swarm. type UploadStore interface { // Upload provides a PutterSession which is tied to the tagID. Optionally if // users requests to pin the data, a new pinning collection is created. Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession, error) // NewSession can be used to obtain a tag ID to use for a new Upload session. NewSession() (SessionInfo, error) // Session will show the information about the session. Session(tagID uint64) (SessionInfo, error) // DeleteSession will delete the session info associated with the tag id. DeleteSession(tagID uint64) error // ListSessions will list all the Sessions currently being tracked. ListSessions(offset, limit int) ([]SessionInfo, error) // BatchHint will return the batch ID hint for the chunk reference if known. BatchHint(swarm.Address) ([]byte, error) } // PinStore is a logical component of the storer which deals with pinning // functionality. type PinStore interface { // NewCollection can be used to create a new PutterSession which writes a new // pinning collection. The address passed in during the Done of the session is // used as the root referencce. NewCollection(context.Context) (PutterSession, error) // DeletePin deletes all the chunks associated with the collection pointed to // by the swarm.Address passed in. DeletePin(context.Context, swarm.Address) error // Pins returns all the root references of pinning collections. Pins() ([]swarm.Address, error) // HasPin is a helper which checks if a collection exists with the root // reference passed in. HasPin(swarm.Address) (bool, error) } // PinIterator is a helper interface which can be used to iterate over all the // chunks in a pinning collection. type PinIterator interface { IteratePinCollection(root swarm.Address, iterateFn func(swarm.Address) (bool, error)) error } // CacheStore is a logical component of the storer that deals with cache // content. type CacheStore interface { // Lookup method provides a storage.Getter wrapped around the underlying // ChunkStore which will update cache related indexes if required on successful // lookups. Lookup() storage.Getter // Cache method provides a storage.Putter which will add the chunks to cache. // This will add the chunk to underlying store as well as new indexes which // will keep track of the chunk in the cache. Cache() storage.Putter } // NetStore is a logical component of the storer that deals with network. It will // push/retrieve chunks from the network. type NetStore interface { // DirectUpload provides a session which can be used to push chunks directly // to the network. DirectUpload() PutterSession // Download provides a getter which can be used to download data. If the data // is found locally, its returned immediately, otherwise it is retrieved from // the network. Download(pin bool) storage.Getter // PusherFeed is the feed for direct push chunks. This can be used by the // pusher component to push out the chunks. PusherFeed() <-chan *pusher.Op } var _ Reserve = (*DB)(nil) // Reserve is a logical component of the storer that deals with reserve // content. It will implement all the core functionality required for the protocols. type Reserve interface { ReserveStore EvictBatch(ctx context.Context, batchID []byte) error ReserveSample(context.Context, []byte, uint8, uint64, *big.Int) (Sample, error) ReserveSize() int } // ReserveIterator is a helper interface which can be used to iterate over all // the chunks in the reserve. type ReserveIterator interface { ReserveIterateChunks(cb func(swarm.Chunk) (bool, error)) error } // ReserveStore is a logical component of the storer that deals with reserve // content. It will implement all the core functionality required for the protocols. type ReserveStore interface { ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte) (swarm.Chunk, error) ReserveHas(addr swarm.Address, batchID []byte) (bool, error) ReservePutter() storage.Putter SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error) ReserveLastBinIDs() ([]uint64, error) RadiusChecker } // RadiusChecker provides the radius related functionality. type RadiusChecker interface { IsWithinStorageRadius(addr swarm.Address) bool StorageRadius() uint8 } // LocalStore is a read-only ChunkStore. It can be used to check if chunk is known // locally, but it cannot tell what is the context of the chunk (whether it is // pinned, uploaded, etc.). type LocalStore interface { ChunkStore() storage.ReadOnlyChunkStore } // Debugger is a helper interface which can be used to debug the storer. type Debugger interface { DebugInfo(context.Context) (Info, error) } type memFS struct { afero.Fs } func (m *memFS) Open(path string) (fs.File, error) { return m.Fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) } type dirFS struct { basedir string } func (d *dirFS) Open(path string) (fs.File, error) { return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0644) } var sharkyNoOfShards = 32 var ErrDBQuit = errors.New("db quit") type closerFn func() error func (c closerFn) Close() error { return c() } func closer(closers ...io.Closer) io.Closer { return closerFn(func() error { var err error for _, closer := range closers { err = errors.Join(err, closer.Close()) } return err }) } func initInmemRepository(locker storage.ChunkLocker) (storage.Repository, io.Closer, error) { store, err := leveldbstore.New("", nil) if err != nil { return nil, nil, fmt.Errorf("failed creating inmem levelDB index store: %w", err) } sharky, err := sharky.New( &memFS{Fs: afero.NewMemMapFs()}, sharkyNoOfShards, swarm.SocMaxChunkSize, ) if err != nil { return nil, nil, fmt.Errorf("failed creating inmem sharky instance: %w", err) } txStore := leveldbstore.NewTxStore(store) txChunkStore := chunkstore.NewTxChunkStore(txStore, sharky) return storage.NewRepository(txStore, txChunkStore, locker), closer(store, sharky), nil } // loggerName is the tree path name of the logger for this package. const loggerName = "storer" // Default options for levelDB. const ( defaultOpenFilesLimit = uint64(256) defaultBlockCacheCapacity = uint64(32 * 1024 * 1024) defaultWriteBufferSize = uint64(32 * 1024 * 1024) defaultDisableSeeksCompaction = false defaultCacheCapacity = uint64(1_000_000) defaultBgCacheWorkers = 16 indexPath = "indexstore" sharkyPath = "sharky" ) func initStore(basePath string, opts *Options) (*leveldbstore.Store, error) { ldbBasePath := path.Join(basePath, indexPath) if _, err := os.Stat(ldbBasePath); os.IsNotExist(err) { err := os.MkdirAll(ldbBasePath, 0777) if err != nil { return nil, err } } store, err := leveldbstore.New(path.Join(basePath, "indexstore"), &opt.Options{ OpenFilesCacheCapacity: int(opts.LdbOpenFilesLimit), BlockCacheCapacity: int(opts.LdbBlockCacheCapacity), WriteBuffer: int(opts.LdbWriteBufferSize), DisableSeeksCompaction: opts.LdbDisableSeeksCompaction, CompactionL0Trigger: 8, Filter: filter.NewBloomFilter(64), }) if err != nil { return nil, fmt.Errorf("failed creating levelDB index store: %w", err) } return store, nil } func initDiskRepository( ctx context.Context, basePath string, locker storage.ChunkLocker, opts *Options, ) (storage.Repository, io.Closer, error) { store, err := initStore(basePath, opts) if err != nil { return nil, nil, fmt.Errorf("failed creating levelDB index store: %w", err) } if opts.LdbStats.Load() != nil { go func() { ldbStats := opts.LdbStats.Load() logger := log.NewLogger(loggerName).Register() ticker := time.NewTicker(15 * time.Second) defer ticker.Stop() for { select { case <-ctx.Done(): return case <-ticker.C: stats := new(leveldb.DBStats) switch err := store.DB().Stats(stats); { case errors.Is(err, leveldb.ErrClosed): return case err != nil: logger.Error(err, "snapshot levelDB stats") default: ldbStats.WithLabelValues("write_delay_count").Observe(float64(stats.WriteDelayCount)) ldbStats.WithLabelValues("write_delay_duration").Observe(stats.WriteDelayDuration.Seconds()) ldbStats.WithLabelValues("alive_snapshots").Observe(float64(stats.AliveSnapshots)) ldbStats.WithLabelValues("alive_iterators").Observe(float64(stats.AliveIterators)) ldbStats.WithLabelValues("io_write").Observe(float64(stats.IOWrite)) ldbStats.WithLabelValues("io_read").Observe(float64(stats.IORead)) ldbStats.WithLabelValues("block_cache_size").Observe(float64(stats.BlockCacheSize)) ldbStats.WithLabelValues("opened_tables_count").Observe(float64(stats.OpenedTablesCount)) ldbStats.WithLabelValues("mem_comp").Observe(float64(stats.MemComp)) ldbStats.WithLabelValues("level_0_comp").Observe(float64(stats.Level0Comp)) ldbStats.WithLabelValues("non_level_0_comp").Observe(float64(stats.NonLevel0Comp)) ldbStats.WithLabelValues("seek_comp").Observe(float64(stats.SeekComp)) for i := 0; i < len(stats.LevelSizes); i++ { ldbStats.WithLabelValues(fmt.Sprintf("level_%d_size", i)).Observe(float64(stats.LevelSizes[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_tables_count", i)).Observe(float64(stats.LevelTablesCounts[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_read", i)).Observe(float64(stats.LevelRead[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_write", i)).Observe(float64(stats.LevelWrite[i])) ldbStats.WithLabelValues(fmt.Sprintf("level_%d_duration", i)).Observe(stats.LevelDurations[i].Seconds()) } } } } }() } sharkyBasePath := path.Join(basePath, sharkyPath) if _, err := os.Stat(sharkyBasePath); os.IsNotExist(err) { err := os.Mkdir(sharkyBasePath, 0777) if err != nil { return nil, nil, err } } recoveryCloser, err := sharkyRecovery(ctx, sharkyBasePath, store, opts) if err != nil { return nil, nil, fmt.Errorf("failed to recover sharky: %w", err) } sharky, err := sharky.New( &dirFS{basedir: sharkyBasePath}, sharkyNoOfShards, swarm.SocMaxChunkSize, ) if err != nil { return nil, nil, fmt.Errorf("failed creating sharky instance: %w", err) } txStore := leveldbstore.NewTxStore(store) if err := txStore.Recover(); err != nil { return nil, nil, fmt.Errorf("failed to recover index store: %w", err) } txChunkStore := chunkstore.NewTxChunkStore(txStore, sharky) if err := txChunkStore.Recover(); err != nil { return nil, nil, fmt.Errorf("failed to recover chunk store: %w", err) } return storage.NewRepository(txStore, txChunkStore, locker), closer(store, sharky, recoveryCloser), nil } func initCache(ctx context.Context, capacity uint64, repo storage.Repository) (*cache.Cache, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() txnRepo, commit, rollback := repo.NewTx(ctx) c, err := cache.New(ctx, txnRepo, capacity) if err != nil { return nil, fmt.Errorf("cache.New: %w", errors.Join(err, rollback())) } return c, commit() } type noopRadiusSetter struct{} func (noopRadiusSetter) SetStorageRadius(uint8) {} func performEpochMigration(ctx context.Context, basePath string, opts *Options) (retErr error) { store, err := initStore(basePath, opts) if err != nil { return err } defer store.Close() sharkyBasePath := path.Join(basePath, sharkyPath) var sharkyRecover *sharky.Recovery // if this is a fresh node then perform an empty epoch migration if _, err := os.Stat(sharkyBasePath); err == nil { sharkyRecover, err = sharky.NewRecovery(sharkyBasePath, sharkyNoOfShards, swarm.SocMaxChunkSize) if err != nil { return err } defer sharkyRecover.Close() } logger := opts.Logger.WithName("epochmigration").Register() var rs reservePutter if opts.ReserveCapacity > 0 { rs, err = reserve.New( opts.Address, store, opts.ReserveCapacity, noopRadiusSetter{}, logger, func(_ context.Context, _ internal.Storage, _ ...swarm.Address) error { return nil }, ) if err != nil { return err } } defer func() { if sharkyRecover != nil { retErr = errors.Join(retErr, sharkyRecover.Save()) } }() return epochMigration(ctx, basePath, opts.StateStore, store, rs, sharkyRecover, logger) } const lockKeyNewSession string = "new_session" // Options provides a container to configure different things in the storer. type Options struct { // These are options related to levelDB. Currently, the underlying storage used is levelDB. LdbStats atomic.Pointer[prometheus.HistogramVec] LdbOpenFilesLimit uint64 LdbBlockCacheCapacity uint64 LdbWriteBufferSize uint64 LdbDisableSeeksCompaction bool CacheCapacity uint64 Logger log.Logger Address swarm.Address WarmupDuration time.Duration Batchstore postage.Storer ValidStamp postage.ValidStampFn RadiusSetter topology.SetStorageRadiuser StateStore storage.StateStorer ReserveCapacity int ReserveWakeUpDuration time.Duration } func defaultOptions() *Options { return &Options{ LdbOpenFilesLimit: defaultOpenFilesLimit, LdbBlockCacheCapacity: defaultBlockCacheCapacity, LdbWriteBufferSize: defaultWriteBufferSize, LdbDisableSeeksCompaction: defaultDisableSeeksCompaction, CacheCapacity: defaultCacheCapacity, Logger: log.Noop, ReserveCapacity: 4_194_304, // 2^22 chunks ReserveWakeUpDuration: time.Minute * 15, } } // DB implements all the component stores described above. type DB struct { logger log.Logger metrics metrics repo storage.Repository lock *multex.Multex cacheObj *cache.Cache retrieval retrieval.Interface pusherFeed chan *pusher.Op quit chan struct{} bgCacheLimiter chan struct{} bgCacheLimiterWg sync.WaitGroup dbCloser io.Closer subscriptionsWG sync.WaitGroup events *events.Subscriber directUploadLimiter chan struct{} reserve *reserve.Reserve inFlight *util.WaitingCounter reserveBinEvents *events.Subscriber baseAddr swarm.Address batchstore postage.Storer validStamp postage.ValidStampFn setSyncerOnce sync.Once syncer Syncer opts workerOpts } type workerOpts struct { warmupDuration time.Duration wakeupDuration time.Duration } // New returns a newly constructed DB object which implements all the above // component stores. func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) { var ( repo storage.Repository err error dbCloser io.Closer ) if opts == nil { opts = defaultOptions() } if opts.Logger == nil { opts.Logger = log.Noop } lock := multex.New() metrics := newMetrics() opts.LdbStats.CompareAndSwap(nil, &metrics.LevelDBStats) locker := func(addr swarm.Address) func() { lock.Lock(addr.ByteString()) return func() { lock.Unlock(addr.ByteString()) } } if dirPath == "" { repo, dbCloser, err = initInmemRepository(locker) if err != nil { return nil, err } } else { // only perform migration if not done already if _, err := os.Stat(path.Join(dirPath, indexPath)); err != nil { err = performEpochMigration(ctx, dirPath, opts) if err != nil { return nil, err } } repo, dbCloser, err = initDiskRepository(ctx, dirPath, locker, opts) if err != nil { return nil, err } } sharkyBasePath := "" if dirPath != "" { sharkyBasePath = path.Join(dirPath, sharkyPath) } err = migration.Migrate( repo.IndexStore(), localmigration.AllSteps(sharkyBasePath, sharkyNoOfShards, repo.ChunkStore()), ) if err != nil { return nil, err } cacheObj, err := initCache(ctx, opts.CacheCapacity, repo) if err != nil { return nil, err } logger := opts.Logger.WithName(loggerName).Register() db := &DB{ metrics: metrics, logger: logger, baseAddr: opts.Address, repo: repo, lock: lock, cacheObj: cacheObj, retrieval: noopRetrieval{}, pusherFeed: make(chan *pusher.Op), quit: make(chan struct{}), bgCacheLimiter: make(chan struct{}, 16), dbCloser: dbCloser, batchstore: opts.Batchstore, validStamp: opts.ValidStamp, events: events.NewSubscriber(), reserveBinEvents: events.NewSubscriber(), opts: workerOpts{ warmupDuration: opts.WarmupDuration, wakeupDuration: opts.ReserveWakeUpDuration, }, directUploadLimiter: make(chan struct{}, pusher.ConcurrentPushes), inFlight: new(util.WaitingCounter), } if db.validStamp == nil { db.validStamp = postage.ValidStamp(db.batchstore) } if opts.ReserveCapacity > 0 { rs, err := reserve.New( opts.Address, repo.IndexStore(), opts.ReserveCapacity, opts.RadiusSetter, logger, func(ctx context.Context, store internal.Storage, addrs ...swarm.Address) error { defer func() { db.metrics.CacheSize.Set(float64(db.cacheObj.Size())) }() db.lock.Lock(cacheAccessLockKey) defer db.lock.Unlock(cacheAccessLockKey) return cacheObj.MoveFromReserve(ctx, store, addrs...) }, ) if err != nil { return nil, err } db.reserve = rs db.metrics.StorageRadius.Set(float64(rs.Radius())) db.metrics.ReserveSize.Set(float64(rs.Size())) } db.metrics.CacheSize.Set(float64(db.cacheObj.Size())) // Cleanup any dirty state in upload and pinning stores, this could happen // in case of dirty shutdowns err = errors.Join( upload.CleanupDirty(db), pinstore.CleanupDirty(db), ) if err != nil { return nil, err } return db, nil } // Metrics returns set of prometheus collectors. func (db *DB) Metrics() []prometheus.Collector { collectors := m.PrometheusCollectorsFromFields(db.metrics) if v, ok := db.repo.(m.Collector); ok { collectors = append(collectors, v.Metrics()...) } return collectors } func (db *DB) Close() error
func (db *DB) SetRetrievalService(r retrieval.Interface) { db.retrieval = r } func (db *DB) StartReserveWorker(ctx context.Context, s Syncer, radius func() (uint8, error)) { db.setSyncerOnce.Do(func() { db.syncer = s go db.startReserveWorkers(ctx, db.opts.warmupDuration, db.opts.wakeupDuration, radius) }) } type noopRetrieval struct{} func (noopRetrieval) RetrieveChunk(_ context.Context, _ swarm.Address, _ swarm.Address) (swarm.Chunk, error) { return nil, storage.ErrNotFound } func (db *DB) ChunkStore() storage.ReadOnlyChunkStore { return db.repo.ChunkStore() } // Execute implements the internal.TxExecutor interface. func (db *DB) Execute(ctx context.Context, do func(internal.Storage) error) error { tx, commit, rollback := db.repo.NewTx(ctx) if err := do(tx); err != nil { return errors.Join(err, rollback()) } return commit() } type putterSession struct { storage.Putter done func(swarm.Address) error cleanup func() error } func (p *putterSession) Done(addr swarm.Address) error { return p.done(addr) } func (p *putterSession) Cleanup() error { return p.cleanup() }
{ close(db.quit) bgReserveWorkersClosed := make(chan struct{}) go func() { defer close(bgReserveWorkersClosed) if c := db.inFlight.Wait(5 * time.Second); c > 0 { db.logger.Warning("db shutting down with running goroutines") } }() bgCacheWorkersClosed := make(chan struct{}) go func() { defer close(bgCacheWorkersClosed) db.bgCacheLimiterWg.Wait() }() var err error closerDone := make(chan struct{}) go func() { defer close(closerDone) err = db.dbCloser.Close() }() done := make(chan struct{}) go func() { defer close(done) <-closerDone <-bgCacheWorkersClosed <-bgReserveWorkersClosed }() select { case <-done: case <-time.After(3 * time.Second): return errors.New("storer closed with bg goroutines running") } return err }
identifier_body
userrole.go
package userrolemod import ( "database/sql" "fmt" "strconv" "strings" "sync" "time" "github.com/intrntsrfr/meido/base" "github.com/intrntsrfr/meido/database" "github.com/intrntsrfr/meido/utils" "go.uber.org/zap" "github.com/bwmarrin/discordgo" "github.com/intrntsrfr/owo" ) type UserRoleMod struct { sync.Mutex name string commands map[string]*base.ModCommand allowedTypes base.MessageType allowDMs bool bot *base.Bot db *database.DB owo *owo.Client log *zap.Logger } func New(b *base.Bot, db *database.DB, owo *owo.Client, log *zap.Logger) base.Mod { return &UserRoleMod{ name: "UserRoles", commands: make(map[string]*base.ModCommand), allowedTypes: base.MessageTypeCreate, allowDMs: false, bot: b, db: db, owo: owo, log: log, } } func (m *UserRoleMod) Name() string { return m.name } func (m *UserRoleMod) Passives() []*base.ModPassive { return []*base.ModPassive{}
func (m *UserRoleMod) AllowedTypes() base.MessageType { return m.allowedTypes } func (m *UserRoleMod) AllowDMs() bool { return m.allowDMs } func (m *UserRoleMod) Hook() error { m.bot.Discord.Sess.AddHandler(func(s *discordgo.Session, r *discordgo.Ready) { refreshTicker := time.NewTicker(time.Hour) go func() { for range refreshTicker.C { for _, g := range m.bot.Discord.Guilds() { if g.Unavailable { continue } var userRoles []*database.UserRole err := m.db.Get(&userRoles, "SELECT * FROM user_role WHERE guild_id=$1", g.ID) if err != nil { continue } for _, ur := range userRoles { hasRole := false for _, gr := range g.Roles { if gr.ID == ur.RoleID { hasRole = true break } } if !hasRole { m.db.Exec("DELETE FROM user_role WHERE uid=$1", ur.UID) } } } } }() }) m.RegisterCommand(NewSetUserRoleCommand(m)) m.RegisterCommand(NewMyRoleCommand(m)) //m.RegisterCommand(NewListUserRolesCommand(m)) return nil } func (m *UserRoleMod) RegisterCommand(cmd *base.ModCommand) { m.Lock() defer m.Unlock() if _, ok := m.commands[cmd.Name]; ok { panic(fmt.Sprintf("command '%v' already exists in %v", cmd.Name, m.Name())) } m.commands[cmd.Name] = cmd } func NewSetUserRoleCommand(m *UserRoleMod) *base.ModCommand { return &base.ModCommand{ Mod: m, Name: "setuserrole", Description: "Binds, unbinds or changes a userrole bind to a user", Triggers: []string{"m?setuserrole"}, Usage: "m?setuserrole 1231231231231 cool role", Cooldown: 3, RequiredPerms: discordgo.PermissionManageRoles, RequiresOwner: false, AllowedTypes: base.MessageTypeCreate, AllowDMs: false, Enabled: true, Run: m.setuserroleCommand, } } func (m *UserRoleMod) setuserroleCommand(msg *base.DiscordMessage) { if msg.LenArgs() < 3 { return } targetMember, err := msg.GetMemberAtArg(1) if err != nil { msg.Reply("could not find that user") return } if targetMember.User.Bot { msg.Reply("Bots dont get to join the fun") return } g, err := msg.Discord.Guild(msg.Message.GuildID) if err != nil { msg.Reply(err.Error()) return } var selectedRole *discordgo.Role for _, role := range g.Roles { if role.ID == msg.Args()[2] { selectedRole = role } else if strings.ToLower(role.Name) == strings.ToLower(strings.Join(msg.Args()[2:], " ")) { selectedRole = role } } if selectedRole == nil { msg.Reply("Could not find that role!") return } userRole := &database.UserRole{} err = m.db.Get(userRole, "SELECT * FROM user_role WHERE guild_id=$1 AND user_id=$2", g.ID, targetMember.User.ID) switch err { case nil: if selectedRole.ID == userRole.RoleID { m.db.Exec("DELETE FROM user_role WHERE guild_id=$1 AND user_id=$2 AND role_id=$3;", g.ID, targetMember.User.ID, selectedRole.ID) msg.Reply(fmt.Sprintf("Unbound role **%v** from user **%v**", selectedRole.Name, targetMember.User.String())) } else { m.db.Exec("UPDATE user_role SET role_id=$1 WHERE guild_id=$2 AND user_id=$3", selectedRole.ID, g.ID, targetMember.User.ID) msg.Reply(fmt.Sprintf("Updated userrole for **%v** to **%v**", targetMember.User.String(), selectedRole.Name)) } case sql.ErrNoRows: m.db.Exec("INSERT INTO user_role(guild_id, user_id, role_id) VALUES($1, $2, $3);", g.ID, targetMember.User.ID, selectedRole.ID) msg.Reply(fmt.Sprintf("Bound role **%v** to user **%v**", selectedRole.Name, targetMember.User.String())) default: fmt.Println(err) msg.Reply("there was an error, please try again") } } func NewMyRoleCommand(m *UserRoleMod) *base.ModCommand { return &base.ModCommand{ Mod: m, Name: "myrole", Description: "Displays a users bound role, or lets the user change the name or color of their bound role", Triggers: []string{"m?myrole"}, Usage: "m?myrole | m?myrole 123123123123 | m?myrole color c0ffee | m?myrole name jeff", Cooldown: 3, RequiredPerms: 0, RequiresOwner: false, AllowedTypes: base.MessageTypeCreate, AllowDMs: false, Enabled: true, Run: m.myroleCommand, } } func (m *UserRoleMod) myroleCommand(msg *base.DiscordMessage) { if msg.LenArgs() < 1 { return } var ( err error oldRole *discordgo.Role target *discordgo.Member ) g, err := msg.Discord.Guild(msg.Message.GuildID) if err != nil { msg.Reply("some error occurred") return } switch la := msg.LenArgs(); { case la > 2: if msg.Args()[1] != "name" && msg.Args()[1] != "color" { return } if allow, err := msg.Discord.HasPermissions(msg.Message.ChannelID, discordgo.PermissionManageRoles); err != nil || !allow { msg.Reply("I am missing 'manage roles' permissions!") return } ur, err := m.db.GetUserRole(msg.GuildID(), msg.AuthorID()) if err != nil && err != sql.ErrNoRows { m.log.Error("error fetching user role", zap.Error(err)) msg.Reply("there was an error, please try again") return } else if err == sql.ErrNoRows { msg.Reply("No custom role set.") return } for _, role := range g.Roles { if role.ID == ur.RoleID { oldRole = role } } if oldRole == nil { msg.Reply("couldnt find role") return } if msg.Args()[1] == "name" { newName := strings.Join(msg.RawArgs()[2:], " ") _, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, newName, oldRole.Color, oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable) if err != nil { if strings.Contains(err.Error(), strconv.Itoa(discordgo.ErrCodeMissingPermissions)) { msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Missing permissions.", Color: utils.ColorCritical}) return } msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error() + "`.", Color: utils.ColorCritical}) return } embed := &discordgo.MessageEmbed{ Color: oldRole.Color, Description: fmt.Sprintf("Role name changed from %v to %v", oldRole.Name, newName), } msg.ReplyEmbed(embed) } else if msg.Args()[1] == "color" { clr := msg.Args()[2] if strings.HasPrefix(clr, "#") { clr = clr[1:] } color, err := strconv.ParseInt(clr, 16, 64) if err != nil || color < 0 || color > 0xFFFFFF { msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Invalid color code.", Color: utils.ColorCritical}) return } _, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, oldRole.Name, int(color), oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable) if err != nil { msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error(), Color: utils.ColorCritical}) return } embed := &discordgo.MessageEmbed{ Color: int(color), //Description: fmt.Sprintf("Color changed from #%v to #%v", fmt.Sprintf("%06X", oldRole.Color), fmt.Sprintf("%06X", color)), Description: fmt.Sprintf("Color changed from #%v to #%v", strconv.FormatInt(int64(oldRole.Color), 16), strconv.FormatInt(color, 16)), // fmt.Sprintf("%06X", color)), } msg.ReplyEmbed(embed) } return case la == 1: target = msg.Member() case la == 2: target, err = msg.GetMemberAtArg(1) if err != nil { msg.Reply("Could not find that user") return } default: return } if target == nil { return } ur, err := m.db.GetUserRole(msg.GuildID(), target.User.ID) if err != nil && err != sql.ErrNoRows { msg.Reply("there was an error, please try again") m.log.Error("error fetching user role", zap.Error(err)) return } else if err == sql.ErrNoRows { msg.Reply("No custom role set.") return } var customRole *discordgo.Role for i := range g.Roles { role := g.Roles[i] if role.ID == ur.RoleID { customRole = role } } if customRole == nil { msg.Reply("the custom role is broken, wait for someone to fix it or try setting a new userrole") return } embed := &discordgo.MessageEmbed{ Color: customRole.Color, Title: fmt.Sprintf("Custom role for %v", target.User.String()), Fields: []*discordgo.MessageEmbedField{ { Name: "Name", Value: customRole.Name, Inline: true, }, { Name: "Color", Value: fmt.Sprintf("#" + fmt.Sprintf("%06X", customRole.Color)), Inline: true, }, }, } msg.ReplyEmbed(embed) } /* func NewListUserRolesCommand(m *UserRoleMod) *base.ModCommand { return &base.ModCommand{ Mod: m, Name: "listuserroles", Description: "Returns a list of the user roles that are in the server, displays if some users still are in the server or not", Triggers: []string{"m?listuserroles"}, Usage: "m?listuserroles", Cooldown: 30, RequiredPerms: 0, RequiresOwner: false, AllowedTypes: base.MessageTypeCreate, AllowDMs: false, Enabled: true, Run: m.listuserrolesCommand, } } func (m *UserRoleMod) listuserrolesCommand(msg *base.DiscordMessage) { if msg.LenArgs() != 1 { return } var userRoles []*database.UserRole err := m.db.Select(&userRoles, "SELECT role_id, user_id FROM userroles WHERE guild_id=$1;", msg.Message.GuildID) if err != nil { msg.Reply("there was an error, please try again") return } g, err := msg.Discord.Guild(msg.Message.GuildID) if err != nil { msg.Reply("some error occurred, please try again") return } text := fmt.Sprintf("Userroles in %v\n\n", g.Name) count := 0 for _, ur := range userRoles { role, err := msg.Discord.Role(g.ID, ur.RoleID) if err != nil { continue } mem, err := msg.Discord.Member(g.ID, ur.UserID) if err != nil { text += fmt.Sprintf("Role #%v: %v (%v) | Bound user: %v - User no longer in guild.\n", count, role.Name, role.ID, ur.UserID) } else { text += fmt.Sprintf("Role #%v: %v (%v) | Bound user: %v (%v)\n", count, role.Name, role.ID, mem.User.String(), mem.User.ID) } count++ } link, err := m.owo.Upload(text) if err != nil { msg.Reply("Error getting user roles.") return } msg.Reply(fmt.Sprintf("User roles in %v\n%v", g.Name, link)) } */
} func (m *UserRoleMod) Commands() map[string]*base.ModCommand { return m.commands }
random_line_split
userrole.go
package userrolemod import ( "database/sql" "fmt" "strconv" "strings" "sync" "time" "github.com/intrntsrfr/meido/base" "github.com/intrntsrfr/meido/database" "github.com/intrntsrfr/meido/utils" "go.uber.org/zap" "github.com/bwmarrin/discordgo" "github.com/intrntsrfr/owo" ) type UserRoleMod struct { sync.Mutex name string commands map[string]*base.ModCommand allowedTypes base.MessageType allowDMs bool bot *base.Bot db *database.DB owo *owo.Client log *zap.Logger } func New(b *base.Bot, db *database.DB, owo *owo.Client, log *zap.Logger) base.Mod { return &UserRoleMod{ name: "UserRoles", commands: make(map[string]*base.ModCommand), allowedTypes: base.MessageTypeCreate, allowDMs: false, bot: b, db: db, owo: owo, log: log, } } func (m *UserRoleMod) Name() string { return m.name } func (m *UserRoleMod) Passives() []*base.ModPassive { return []*base.ModPassive{} } func (m *UserRoleMod) Commands() map[string]*base.ModCommand { return m.commands } func (m *UserRoleMod) AllowedTypes() base.MessageType { return m.allowedTypes } func (m *UserRoleMod) AllowDMs() bool { return m.allowDMs } func (m *UserRoleMod) Hook() error { m.bot.Discord.Sess.AddHandler(func(s *discordgo.Session, r *discordgo.Ready) { refreshTicker := time.NewTicker(time.Hour) go func() { for range refreshTicker.C { for _, g := range m.bot.Discord.Guilds() { if g.Unavailable { continue } var userRoles []*database.UserRole err := m.db.Get(&userRoles, "SELECT * FROM user_role WHERE guild_id=$1", g.ID) if err != nil { continue } for _, ur := range userRoles { hasRole := false for _, gr := range g.Roles { if gr.ID == ur.RoleID { hasRole = true break } } if !hasRole { m.db.Exec("DELETE FROM user_role WHERE uid=$1", ur.UID) } } } } }() }) m.RegisterCommand(NewSetUserRoleCommand(m)) m.RegisterCommand(NewMyRoleCommand(m)) //m.RegisterCommand(NewListUserRolesCommand(m)) return nil } func (m *UserRoleMod) RegisterCommand(cmd *base.ModCommand) { m.Lock() defer m.Unlock() if _, ok := m.commands[cmd.Name]; ok { panic(fmt.Sprintf("command '%v' already exists in %v", cmd.Name, m.Name())) } m.commands[cmd.Name] = cmd } func NewSetUserRoleCommand(m *UserRoleMod) *base.ModCommand
func (m *UserRoleMod) setuserroleCommand(msg *base.DiscordMessage) { if msg.LenArgs() < 3 { return } targetMember, err := msg.GetMemberAtArg(1) if err != nil { msg.Reply("could not find that user") return } if targetMember.User.Bot { msg.Reply("Bots dont get to join the fun") return } g, err := msg.Discord.Guild(msg.Message.GuildID) if err != nil { msg.Reply(err.Error()) return } var selectedRole *discordgo.Role for _, role := range g.Roles { if role.ID == msg.Args()[2] { selectedRole = role } else if strings.ToLower(role.Name) == strings.ToLower(strings.Join(msg.Args()[2:], " ")) { selectedRole = role } } if selectedRole == nil { msg.Reply("Could not find that role!") return } userRole := &database.UserRole{} err = m.db.Get(userRole, "SELECT * FROM user_role WHERE guild_id=$1 AND user_id=$2", g.ID, targetMember.User.ID) switch err { case nil: if selectedRole.ID == userRole.RoleID { m.db.Exec("DELETE FROM user_role WHERE guild_id=$1 AND user_id=$2 AND role_id=$3;", g.ID, targetMember.User.ID, selectedRole.ID) msg.Reply(fmt.Sprintf("Unbound role **%v** from user **%v**", selectedRole.Name, targetMember.User.String())) } else { m.db.Exec("UPDATE user_role SET role_id=$1 WHERE guild_id=$2 AND user_id=$3", selectedRole.ID, g.ID, targetMember.User.ID) msg.Reply(fmt.Sprintf("Updated userrole for **%v** to **%v**", targetMember.User.String(), selectedRole.Name)) } case sql.ErrNoRows: m.db.Exec("INSERT INTO user_role(guild_id, user_id, role_id) VALUES($1, $2, $3);", g.ID, targetMember.User.ID, selectedRole.ID) msg.Reply(fmt.Sprintf("Bound role **%v** to user **%v**", selectedRole.Name, targetMember.User.String())) default: fmt.Println(err) msg.Reply("there was an error, please try again") } } func NewMyRoleCommand(m *UserRoleMod) *base.ModCommand { return &base.ModCommand{ Mod: m, Name: "myrole", Description: "Displays a users bound role, or lets the user change the name or color of their bound role", Triggers: []string{"m?myrole"}, Usage: "m?myrole | m?myrole 123123123123 | m?myrole color c0ffee | m?myrole name jeff", Cooldown: 3, RequiredPerms: 0, RequiresOwner: false, AllowedTypes: base.MessageTypeCreate, AllowDMs: false, Enabled: true, Run: m.myroleCommand, } } func (m *UserRoleMod) myroleCommand(msg *base.DiscordMessage) { if msg.LenArgs() < 1 { return } var ( err error oldRole *discordgo.Role target *discordgo.Member ) g, err := msg.Discord.Guild(msg.Message.GuildID) if err != nil { msg.Reply("some error occurred") return } switch la := msg.LenArgs(); { case la > 2: if msg.Args()[1] != "name" && msg.Args()[1] != "color" { return } if allow, err := msg.Discord.HasPermissions(msg.Message.ChannelID, discordgo.PermissionManageRoles); err != nil || !allow { msg.Reply("I am missing 'manage roles' permissions!") return } ur, err := m.db.GetUserRole(msg.GuildID(), msg.AuthorID()) if err != nil && err != sql.ErrNoRows { m.log.Error("error fetching user role", zap.Error(err)) msg.Reply("there was an error, please try again") return } else if err == sql.ErrNoRows { msg.Reply("No custom role set.") return } for _, role := range g.Roles { if role.ID == ur.RoleID { oldRole = role } } if oldRole == nil { msg.Reply("couldnt find role") return } if msg.Args()[1] == "name" { newName := strings.Join(msg.RawArgs()[2:], " ") _, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, newName, oldRole.Color, oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable) if err != nil { if strings.Contains(err.Error(), strconv.Itoa(discordgo.ErrCodeMissingPermissions)) { msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Missing permissions.", Color: utils.ColorCritical}) return } msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error() + "`.", Color: utils.ColorCritical}) return } embed := &discordgo.MessageEmbed{ Color: oldRole.Color, Description: fmt.Sprintf("Role name changed from %v to %v", oldRole.Name, newName), } msg.ReplyEmbed(embed) } else if msg.Args()[1] == "color" { clr := msg.Args()[2] if strings.HasPrefix(clr, "#") { clr = clr[1:] } color, err := strconv.ParseInt(clr, 16, 64) if err != nil || color < 0 || color > 0xFFFFFF { msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Invalid color code.", Color: utils.ColorCritical}) return } _, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, oldRole.Name, int(color), oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable) if err != nil { msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error(), Color: utils.ColorCritical}) return } embed := &discordgo.MessageEmbed{ Color: int(color), //Description: fmt.Sprintf("Color changed from #%v to #%v", fmt.Sprintf("%06X", oldRole.Color), fmt.Sprintf("%06X", color)), Description: fmt.Sprintf("Color changed from #%v to #%v", strconv.FormatInt(int64(oldRole.Color), 16), strconv.FormatInt(color, 16)), // fmt.Sprintf("%06X", color)), } msg.ReplyEmbed(embed) } return case la == 1: target = msg.Member() case la == 2: target, err = msg.GetMemberAtArg(1) if err != nil { msg.Reply("Could not find that user") return } default: return } if target == nil { return } ur, err := m.db.GetUserRole(msg.GuildID(), target.User.ID) if err != nil && err != sql.ErrNoRows { msg.Reply("there was an error, please try again") m.log.Error("error fetching user role", zap.Error(err)) return } else if err == sql.ErrNoRows { msg.Reply("No custom role set.") return } var customRole *discordgo.Role for i := range g.Roles { role := g.Roles[i] if role.ID == ur.RoleID { customRole = role } } if customRole == nil { msg.Reply("the custom role is broken, wait for someone to fix it or try setting a new userrole") return } embed := &discordgo.MessageEmbed{ Color: customRole.Color, Title: fmt.Sprintf("Custom role for %v", target.User.String()), Fields: []*discordgo.MessageEmbedField{ { Name: "Name", Value: customRole.Name, Inline: true, }, { Name: "Color", Value: fmt.Sprintf("#" + fmt.Sprintf("%06X", customRole.Color)), Inline: true, }, }, } msg.ReplyEmbed(embed) } /* func NewListUserRolesCommand(m *UserRoleMod) *base.ModCommand { return &base.ModCommand{ Mod: m, Name: "listuserroles", Description: "Returns a list of the user roles that are in the server, displays if some users still are in the server or not", Triggers: []string{"m?listuserroles"}, Usage: "m?listuserroles", Cooldown: 30, RequiredPerms: 0, RequiresOwner: false, AllowedTypes: base.MessageTypeCreate, AllowDMs: false, Enabled: true, Run: m.listuserrolesCommand, } } func (m *UserRoleMod) listuserrolesCommand(msg *base.DiscordMessage) { if msg.LenArgs() != 1 { return } var userRoles []*database.UserRole err := m.db.Select(&userRoles, "SELECT role_id, user_id FROM userroles WHERE guild_id=$1;", msg.Message.GuildID) if err != nil { msg.Reply("there was an error, please try again") return } g, err := msg.Discord.Guild(msg.Message.GuildID) if err != nil { msg.Reply("some error occurred, please try again") return } text := fmt.Sprintf("Userroles in %v\n\n", g.Name) count := 0 for _, ur := range userRoles { role, err := msg.Discord.Role(g.ID, ur.RoleID) if err != nil { continue } mem, err := msg.Discord.Member(g.ID, ur.UserID) if err != nil { text += fmt.Sprintf("Role #%v: %v (%v) | Bound user: %v - User no longer in guild.\n", count, role.Name, role.ID, ur.UserID) } else { text += fmt.Sprintf("Role #%v: %v (%v) | Bound user: %v (%v)\n", count, role.Name, role.ID, mem.User.String(), mem.User.ID) } count++ } link, err := m.owo.Upload(text) if err != nil { msg.Reply("Error getting user roles.") return } msg.Reply(fmt.Sprintf("User roles in %v\n%v", g.Name, link)) } */
{ return &base.ModCommand{ Mod: m, Name: "setuserrole", Description: "Binds, unbinds or changes a userrole bind to a user", Triggers: []string{"m?setuserrole"}, Usage: "m?setuserrole 1231231231231 cool role", Cooldown: 3, RequiredPerms: discordgo.PermissionManageRoles, RequiresOwner: false, AllowedTypes: base.MessageTypeCreate, AllowDMs: false, Enabled: true, Run: m.setuserroleCommand, } }
identifier_body
userrole.go
package userrolemod import ( "database/sql" "fmt" "strconv" "strings" "sync" "time" "github.com/intrntsrfr/meido/base" "github.com/intrntsrfr/meido/database" "github.com/intrntsrfr/meido/utils" "go.uber.org/zap" "github.com/bwmarrin/discordgo" "github.com/intrntsrfr/owo" ) type UserRoleMod struct { sync.Mutex name string commands map[string]*base.ModCommand allowedTypes base.MessageType allowDMs bool bot *base.Bot db *database.DB owo *owo.Client log *zap.Logger } func New(b *base.Bot, db *database.DB, owo *owo.Client, log *zap.Logger) base.Mod { return &UserRoleMod{ name: "UserRoles", commands: make(map[string]*base.ModCommand), allowedTypes: base.MessageTypeCreate, allowDMs: false, bot: b, db: db, owo: owo, log: log, } } func (m *UserRoleMod) Name() string { return m.name } func (m *UserRoleMod) Passives() []*base.ModPassive { return []*base.ModPassive{} } func (m *UserRoleMod) Commands() map[string]*base.ModCommand { return m.commands } func (m *UserRoleMod) AllowedTypes() base.MessageType { return m.allowedTypes } func (m *UserRoleMod)
() bool { return m.allowDMs } func (m *UserRoleMod) Hook() error { m.bot.Discord.Sess.AddHandler(func(s *discordgo.Session, r *discordgo.Ready) { refreshTicker := time.NewTicker(time.Hour) go func() { for range refreshTicker.C { for _, g := range m.bot.Discord.Guilds() { if g.Unavailable { continue } var userRoles []*database.UserRole err := m.db.Get(&userRoles, "SELECT * FROM user_role WHERE guild_id=$1", g.ID) if err != nil { continue } for _, ur := range userRoles { hasRole := false for _, gr := range g.Roles { if gr.ID == ur.RoleID { hasRole = true break } } if !hasRole { m.db.Exec("DELETE FROM user_role WHERE uid=$1", ur.UID) } } } } }() }) m.RegisterCommand(NewSetUserRoleCommand(m)) m.RegisterCommand(NewMyRoleCommand(m)) //m.RegisterCommand(NewListUserRolesCommand(m)) return nil } func (m *UserRoleMod) RegisterCommand(cmd *base.ModCommand) { m.Lock() defer m.Unlock() if _, ok := m.commands[cmd.Name]; ok { panic(fmt.Sprintf("command '%v' already exists in %v", cmd.Name, m.Name())) } m.commands[cmd.Name] = cmd } func NewSetUserRoleCommand(m *UserRoleMod) *base.ModCommand { return &base.ModCommand{ Mod: m, Name: "setuserrole", Description: "Binds, unbinds or changes a userrole bind to a user", Triggers: []string{"m?setuserrole"}, Usage: "m?setuserrole 1231231231231 cool role", Cooldown: 3, RequiredPerms: discordgo.PermissionManageRoles, RequiresOwner: false, AllowedTypes: base.MessageTypeCreate, AllowDMs: false, Enabled: true, Run: m.setuserroleCommand, } } func (m *UserRoleMod) setuserroleCommand(msg *base.DiscordMessage) { if msg.LenArgs() < 3 { return } targetMember, err := msg.GetMemberAtArg(1) if err != nil { msg.Reply("could not find that user") return } if targetMember.User.Bot { msg.Reply("Bots dont get to join the fun") return } g, err := msg.Discord.Guild(msg.Message.GuildID) if err != nil { msg.Reply(err.Error()) return } var selectedRole *discordgo.Role for _, role := range g.Roles { if role.ID == msg.Args()[2] { selectedRole = role } else if strings.ToLower(role.Name) == strings.ToLower(strings.Join(msg.Args()[2:], " ")) { selectedRole = role } } if selectedRole == nil { msg.Reply("Could not find that role!") return } userRole := &database.UserRole{} err = m.db.Get(userRole, "SELECT * FROM user_role WHERE guild_id=$1 AND user_id=$2", g.ID, targetMember.User.ID) switch err { case nil: if selectedRole.ID == userRole.RoleID { m.db.Exec("DELETE FROM user_role WHERE guild_id=$1 AND user_id=$2 AND role_id=$3;", g.ID, targetMember.User.ID, selectedRole.ID) msg.Reply(fmt.Sprintf("Unbound role **%v** from user **%v**", selectedRole.Name, targetMember.User.String())) } else { m.db.Exec("UPDATE user_role SET role_id=$1 WHERE guild_id=$2 AND user_id=$3", selectedRole.ID, g.ID, targetMember.User.ID) msg.Reply(fmt.Sprintf("Updated userrole for **%v** to **%v**", targetMember.User.String(), selectedRole.Name)) } case sql.ErrNoRows: m.db.Exec("INSERT INTO user_role(guild_id, user_id, role_id) VALUES($1, $2, $3);", g.ID, targetMember.User.ID, selectedRole.ID) msg.Reply(fmt.Sprintf("Bound role **%v** to user **%v**", selectedRole.Name, targetMember.User.String())) default: fmt.Println(err) msg.Reply("there was an error, please try again") } } func NewMyRoleCommand(m *UserRoleMod) *base.ModCommand { return &base.ModCommand{ Mod: m, Name: "myrole", Description: "Displays a users bound role, or lets the user change the name or color of their bound role", Triggers: []string{"m?myrole"}, Usage: "m?myrole | m?myrole 123123123123 | m?myrole color c0ffee | m?myrole name jeff", Cooldown: 3, RequiredPerms: 0, RequiresOwner: false, AllowedTypes: base.MessageTypeCreate, AllowDMs: false, Enabled: true, Run: m.myroleCommand, } } func (m *UserRoleMod) myroleCommand(msg *base.DiscordMessage) { if msg.LenArgs() < 1 { return } var ( err error oldRole *discordgo.Role target *discordgo.Member ) g, err := msg.Discord.Guild(msg.Message.GuildID) if err != nil { msg.Reply("some error occurred") return } switch la := msg.LenArgs(); { case la > 2: if msg.Args()[1] != "name" && msg.Args()[1] != "color" { return } if allow, err := msg.Discord.HasPermissions(msg.Message.ChannelID, discordgo.PermissionManageRoles); err != nil || !allow { msg.Reply("I am missing 'manage roles' permissions!") return } ur, err := m.db.GetUserRole(msg.GuildID(), msg.AuthorID()) if err != nil && err != sql.ErrNoRows { m.log.Error("error fetching user role", zap.Error(err)) msg.Reply("there was an error, please try again") return } else if err == sql.ErrNoRows { msg.Reply("No custom role set.") return } for _, role := range g.Roles { if role.ID == ur.RoleID { oldRole = role } } if oldRole == nil { msg.Reply("couldnt find role") return } if msg.Args()[1] == "name" { newName := strings.Join(msg.RawArgs()[2:], " ") _, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, newName, oldRole.Color, oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable) if err != nil { if strings.Contains(err.Error(), strconv.Itoa(discordgo.ErrCodeMissingPermissions)) { msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Missing permissions.", Color: utils.ColorCritical}) return } msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error() + "`.", Color: utils.ColorCritical}) return } embed := &discordgo.MessageEmbed{ Color: oldRole.Color, Description: fmt.Sprintf("Role name changed from %v to %v", oldRole.Name, newName), } msg.ReplyEmbed(embed) } else if msg.Args()[1] == "color" { clr := msg.Args()[2] if strings.HasPrefix(clr, "#") { clr = clr[1:] } color, err := strconv.ParseInt(clr, 16, 64) if err != nil || color < 0 || color > 0xFFFFFF { msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Invalid color code.", Color: utils.ColorCritical}) return } _, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, oldRole.Name, int(color), oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable) if err != nil { msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error(), Color: utils.ColorCritical}) return } embed := &discordgo.MessageEmbed{ Color: int(color), //Description: fmt.Sprintf("Color changed from #%v to #%v", fmt.Sprintf("%06X", oldRole.Color), fmt.Sprintf("%06X", color)), Description: fmt.Sprintf("Color changed from #%v to #%v", strconv.FormatInt(int64(oldRole.Color), 16), strconv.FormatInt(color, 16)), // fmt.Sprintf("%06X", color)), } msg.ReplyEmbed(embed) } return case la == 1: target = msg.Member() case la == 2: target, err = msg.GetMemberAtArg(1) if err != nil { msg.Reply("Could not find that user") return } default: return } if target == nil { return } ur, err := m.db.GetUserRole(msg.GuildID(), target.User.ID) if err != nil && err != sql.ErrNoRows { msg.Reply("there was an error, please try again") m.log.Error("error fetching user role", zap.Error(err)) return } else if err == sql.ErrNoRows { msg.Reply("No custom role set.") return } var customRole *discordgo.Role for i := range g.Roles { role := g.Roles[i] if role.ID == ur.RoleID { customRole = role } } if customRole == nil { msg.Reply("the custom role is broken, wait for someone to fix it or try setting a new userrole") return } embed := &discordgo.MessageEmbed{ Color: customRole.Color, Title: fmt.Sprintf("Custom role for %v", target.User.String()), Fields: []*discordgo.MessageEmbedField{ { Name: "Name", Value: customRole.Name, Inline: true, }, { Name: "Color", Value: fmt.Sprintf("#" + fmt.Sprintf("%06X", customRole.Color)), Inline: true, }, }, } msg.ReplyEmbed(embed) } /* func NewListUserRolesCommand(m *UserRoleMod) *base.ModCommand { return &base.ModCommand{ Mod: m, Name: "listuserroles", Description: "Returns a list of the user roles that are in the server, displays if some users still are in the server or not", Triggers: []string{"m?listuserroles"}, Usage: "m?listuserroles", Cooldown: 30, RequiredPerms: 0, RequiresOwner: false, AllowedTypes: base.MessageTypeCreate, AllowDMs: false, Enabled: true, Run: m.listuserrolesCommand, } } func (m *UserRoleMod) listuserrolesCommand(msg *base.DiscordMessage) { if msg.LenArgs() != 1 { return } var userRoles []*database.UserRole err := m.db.Select(&userRoles, "SELECT role_id, user_id FROM userroles WHERE guild_id=$1;", msg.Message.GuildID) if err != nil { msg.Reply("there was an error, please try again") return } g, err := msg.Discord.Guild(msg.Message.GuildID) if err != nil { msg.Reply("some error occurred, please try again") return } text := fmt.Sprintf("Userroles in %v\n\n", g.Name) count := 0 for _, ur := range userRoles { role, err := msg.Discord.Role(g.ID, ur.RoleID) if err != nil { continue } mem, err := msg.Discord.Member(g.ID, ur.UserID) if err != nil { text += fmt.Sprintf("Role #%v: %v (%v) | Bound user: %v - User no longer in guild.\n", count, role.Name, role.ID, ur.UserID) } else { text += fmt.Sprintf("Role #%v: %v (%v) | Bound user: %v (%v)\n", count, role.Name, role.ID, mem.User.String(), mem.User.ID) } count++ } link, err := m.owo.Upload(text) if err != nil { msg.Reply("Error getting user roles.") return } msg.Reply(fmt.Sprintf("User roles in %v\n%v", g.Name, link)) } */
AllowDMs
identifier_name
userrole.go
package userrolemod import ( "database/sql" "fmt" "strconv" "strings" "sync" "time" "github.com/intrntsrfr/meido/base" "github.com/intrntsrfr/meido/database" "github.com/intrntsrfr/meido/utils" "go.uber.org/zap" "github.com/bwmarrin/discordgo" "github.com/intrntsrfr/owo" ) type UserRoleMod struct { sync.Mutex name string commands map[string]*base.ModCommand allowedTypes base.MessageType allowDMs bool bot *base.Bot db *database.DB owo *owo.Client log *zap.Logger } func New(b *base.Bot, db *database.DB, owo *owo.Client, log *zap.Logger) base.Mod { return &UserRoleMod{ name: "UserRoles", commands: make(map[string]*base.ModCommand), allowedTypes: base.MessageTypeCreate, allowDMs: false, bot: b, db: db, owo: owo, log: log, } } func (m *UserRoleMod) Name() string { return m.name } func (m *UserRoleMod) Passives() []*base.ModPassive { return []*base.ModPassive{} } func (m *UserRoleMod) Commands() map[string]*base.ModCommand { return m.commands } func (m *UserRoleMod) AllowedTypes() base.MessageType { return m.allowedTypes } func (m *UserRoleMod) AllowDMs() bool { return m.allowDMs } func (m *UserRoleMod) Hook() error { m.bot.Discord.Sess.AddHandler(func(s *discordgo.Session, r *discordgo.Ready) { refreshTicker := time.NewTicker(time.Hour) go func() { for range refreshTicker.C { for _, g := range m.bot.Discord.Guilds() { if g.Unavailable { continue } var userRoles []*database.UserRole err := m.db.Get(&userRoles, "SELECT * FROM user_role WHERE guild_id=$1", g.ID) if err != nil { continue } for _, ur := range userRoles { hasRole := false for _, gr := range g.Roles { if gr.ID == ur.RoleID { hasRole = true break } } if !hasRole { m.db.Exec("DELETE FROM user_role WHERE uid=$1", ur.UID) } } } } }() }) m.RegisterCommand(NewSetUserRoleCommand(m)) m.RegisterCommand(NewMyRoleCommand(m)) //m.RegisterCommand(NewListUserRolesCommand(m)) return nil } func (m *UserRoleMod) RegisterCommand(cmd *base.ModCommand) { m.Lock() defer m.Unlock() if _, ok := m.commands[cmd.Name]; ok { panic(fmt.Sprintf("command '%v' already exists in %v", cmd.Name, m.Name())) } m.commands[cmd.Name] = cmd } func NewSetUserRoleCommand(m *UserRoleMod) *base.ModCommand { return &base.ModCommand{ Mod: m, Name: "setuserrole", Description: "Binds, unbinds or changes a userrole bind to a user", Triggers: []string{"m?setuserrole"}, Usage: "m?setuserrole 1231231231231 cool role", Cooldown: 3, RequiredPerms: discordgo.PermissionManageRoles, RequiresOwner: false, AllowedTypes: base.MessageTypeCreate, AllowDMs: false, Enabled: true, Run: m.setuserroleCommand, } } func (m *UserRoleMod) setuserroleCommand(msg *base.DiscordMessage) { if msg.LenArgs() < 3 { return } targetMember, err := msg.GetMemberAtArg(1) if err != nil { msg.Reply("could not find that user") return } if targetMember.User.Bot { msg.Reply("Bots dont get to join the fun") return } g, err := msg.Discord.Guild(msg.Message.GuildID) if err != nil { msg.Reply(err.Error()) return } var selectedRole *discordgo.Role for _, role := range g.Roles { if role.ID == msg.Args()[2] { selectedRole = role } else if strings.ToLower(role.Name) == strings.ToLower(strings.Join(msg.Args()[2:], " ")) { selectedRole = role } } if selectedRole == nil { msg.Reply("Could not find that role!") return } userRole := &database.UserRole{} err = m.db.Get(userRole, "SELECT * FROM user_role WHERE guild_id=$1 AND user_id=$2", g.ID, targetMember.User.ID) switch err { case nil: if selectedRole.ID == userRole.RoleID { m.db.Exec("DELETE FROM user_role WHERE guild_id=$1 AND user_id=$2 AND role_id=$3;", g.ID, targetMember.User.ID, selectedRole.ID) msg.Reply(fmt.Sprintf("Unbound role **%v** from user **%v**", selectedRole.Name, targetMember.User.String())) } else { m.db.Exec("UPDATE user_role SET role_id=$1 WHERE guild_id=$2 AND user_id=$3", selectedRole.ID, g.ID, targetMember.User.ID) msg.Reply(fmt.Sprintf("Updated userrole for **%v** to **%v**", targetMember.User.String(), selectedRole.Name)) } case sql.ErrNoRows: m.db.Exec("INSERT INTO user_role(guild_id, user_id, role_id) VALUES($1, $2, $3);", g.ID, targetMember.User.ID, selectedRole.ID) msg.Reply(fmt.Sprintf("Bound role **%v** to user **%v**", selectedRole.Name, targetMember.User.String())) default: fmt.Println(err) msg.Reply("there was an error, please try again") } } func NewMyRoleCommand(m *UserRoleMod) *base.ModCommand { return &base.ModCommand{ Mod: m, Name: "myrole", Description: "Displays a users bound role, or lets the user change the name or color of their bound role", Triggers: []string{"m?myrole"}, Usage: "m?myrole | m?myrole 123123123123 | m?myrole color c0ffee | m?myrole name jeff", Cooldown: 3, RequiredPerms: 0, RequiresOwner: false, AllowedTypes: base.MessageTypeCreate, AllowDMs: false, Enabled: true, Run: m.myroleCommand, } } func (m *UserRoleMod) myroleCommand(msg *base.DiscordMessage) { if msg.LenArgs() < 1 { return } var ( err error oldRole *discordgo.Role target *discordgo.Member ) g, err := msg.Discord.Guild(msg.Message.GuildID) if err != nil { msg.Reply("some error occurred") return } switch la := msg.LenArgs(); { case la > 2: if msg.Args()[1] != "name" && msg.Args()[1] != "color" { return } if allow, err := msg.Discord.HasPermissions(msg.Message.ChannelID, discordgo.PermissionManageRoles); err != nil || !allow { msg.Reply("I am missing 'manage roles' permissions!") return } ur, err := m.db.GetUserRole(msg.GuildID(), msg.AuthorID()) if err != nil && err != sql.ErrNoRows { m.log.Error("error fetching user role", zap.Error(err)) msg.Reply("there was an error, please try again") return } else if err == sql.ErrNoRows { msg.Reply("No custom role set.") return } for _, role := range g.Roles { if role.ID == ur.RoleID { oldRole = role } } if oldRole == nil { msg.Reply("couldnt find role") return } if msg.Args()[1] == "name"
else if msg.Args()[1] == "color" { clr := msg.Args()[2] if strings.HasPrefix(clr, "#") { clr = clr[1:] } color, err := strconv.ParseInt(clr, 16, 64) if err != nil || color < 0 || color > 0xFFFFFF { msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Invalid color code.", Color: utils.ColorCritical}) return } _, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, oldRole.Name, int(color), oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable) if err != nil { msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error(), Color: utils.ColorCritical}) return } embed := &discordgo.MessageEmbed{ Color: int(color), //Description: fmt.Sprintf("Color changed from #%v to #%v", fmt.Sprintf("%06X", oldRole.Color), fmt.Sprintf("%06X", color)), Description: fmt.Sprintf("Color changed from #%v to #%v", strconv.FormatInt(int64(oldRole.Color), 16), strconv.FormatInt(color, 16)), // fmt.Sprintf("%06X", color)), } msg.ReplyEmbed(embed) } return case la == 1: target = msg.Member() case la == 2: target, err = msg.GetMemberAtArg(1) if err != nil { msg.Reply("Could not find that user") return } default: return } if target == nil { return } ur, err := m.db.GetUserRole(msg.GuildID(), target.User.ID) if err != nil && err != sql.ErrNoRows { msg.Reply("there was an error, please try again") m.log.Error("error fetching user role", zap.Error(err)) return } else if err == sql.ErrNoRows { msg.Reply("No custom role set.") return } var customRole *discordgo.Role for i := range g.Roles { role := g.Roles[i] if role.ID == ur.RoleID { customRole = role } } if customRole == nil { msg.Reply("the custom role is broken, wait for someone to fix it or try setting a new userrole") return } embed := &discordgo.MessageEmbed{ Color: customRole.Color, Title: fmt.Sprintf("Custom role for %v", target.User.String()), Fields: []*discordgo.MessageEmbedField{ { Name: "Name", Value: customRole.Name, Inline: true, }, { Name: "Color", Value: fmt.Sprintf("#" + fmt.Sprintf("%06X", customRole.Color)), Inline: true, }, }, } msg.ReplyEmbed(embed) } /* func NewListUserRolesCommand(m *UserRoleMod) *base.ModCommand { return &base.ModCommand{ Mod: m, Name: "listuserroles", Description: "Returns a list of the user roles that are in the server, displays if some users still are in the server or not", Triggers: []string{"m?listuserroles"}, Usage: "m?listuserroles", Cooldown: 30, RequiredPerms: 0, RequiresOwner: false, AllowedTypes: base.MessageTypeCreate, AllowDMs: false, Enabled: true, Run: m.listuserrolesCommand, } } func (m *UserRoleMod) listuserrolesCommand(msg *base.DiscordMessage) { if msg.LenArgs() != 1 { return } var userRoles []*database.UserRole err := m.db.Select(&userRoles, "SELECT role_id, user_id FROM userroles WHERE guild_id=$1;", msg.Message.GuildID) if err != nil { msg.Reply("there was an error, please try again") return } g, err := msg.Discord.Guild(msg.Message.GuildID) if err != nil { msg.Reply("some error occurred, please try again") return } text := fmt.Sprintf("Userroles in %v\n\n", g.Name) count := 0 for _, ur := range userRoles { role, err := msg.Discord.Role(g.ID, ur.RoleID) if err != nil { continue } mem, err := msg.Discord.Member(g.ID, ur.UserID) if err != nil { text += fmt.Sprintf("Role #%v: %v (%v) | Bound user: %v - User no longer in guild.\n", count, role.Name, role.ID, ur.UserID) } else { text += fmt.Sprintf("Role #%v: %v (%v) | Bound user: %v (%v)\n", count, role.Name, role.ID, mem.User.String(), mem.User.ID) } count++ } link, err := m.owo.Upload(text) if err != nil { msg.Reply("Error getting user roles.") return } msg.Reply(fmt.Sprintf("User roles in %v\n%v", g.Name, link)) } */
{ newName := strings.Join(msg.RawArgs()[2:], " ") _, err = msg.Discord.Sess.GuildRoleEdit(g.ID, oldRole.ID, newName, oldRole.Color, oldRole.Hoist, oldRole.Permissions, oldRole.Mentionable) if err != nil { if strings.Contains(err.Error(), strconv.Itoa(discordgo.ErrCodeMissingPermissions)) { msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Missing permissions.", Color: utils.ColorCritical}) return } msg.ReplyEmbed(&discordgo.MessageEmbed{Description: "Some error occurred: `" + err.Error() + "`.", Color: utils.ColorCritical}) return } embed := &discordgo.MessageEmbed{ Color: oldRole.Color, Description: fmt.Sprintf("Role name changed from %v to %v", oldRole.Name, newName), } msg.ReplyEmbed(embed) }
conditional_block
threejs_adapter.js
/** * A Library Adapter Layer for ThreeJS */ CONSTRUCTORS_DECORATED = false; GUID_PROPERTY_ADDED = false; /** * Constructor for the ThreeJS Library Adapter Layer. * @param {object} config - A configuration object. Must contain a 'shareConf' * function. */ ThreeAdapter = function (config) { this.config = config; this.allLocalObjects = {}; this.nextIncrementalGuid = 0; } /** * */ ThreeAdapter.prototype.initialize = function (mulwapp) { this.addGuidProperty(); this.setupConstructorInterceptors(mulwapp, this.getConstructors()); } /** * */ ThreeAdapter.prototype.addGuidProperty = function () { if (GUID_PROPERTY_ADDED) throw 'GUID property has already been added'; GUID_PROPERTY_ADDED = true; if ('mulwapp_guid' in THREE.Object3D.prototype) { return; } var propobj = { get: function () { if (this._mulwapp_guid == undefined) { this._mulwapp_guid = '' + Math.random(); } return this._mulwapp_guid; }, set: function (x) { this._mulwapp_guid = x; } }; Object.defineProperty(THREE.Object3D.prototype, 'mulwapp_guid', propobj); Object.defineProperty(THREE.Geometry.prototype, 'mulwapp_guid', propobj); Object.defineProperty(THREE.Material.prototype, 'mulwapp_guid', propobj); } /** * Fetches dependencies out of the create spec and returns them as a list * @param {object} a create spec * @return {Array} a list of object dependencies */ ThreeAdapter.prototype.getDependencies = function (createSpec) { var filter = function (arg) { return !arg.primitive; } var map = function (arg) { return arg.value; } return createSpec.args.filter(filter).map(map); } /** * Calculates the diff model * @param {THREE.Scene} root - The scene graph root * @returns The diff model of the current scene graph */ ThreeAdapter.prototype.calculateDiffModel = function (root) { var doc = {}; (function aux (node, parentNode) { var docNode = { 'extra' : node.mulwapp_create_spec, 'dependencies' : this.getDependencies(node.mulwapp_create_spec), 'props' : {}, 'children' : {} }; if (node instanceof THREE.Object3D) { var conf = this.config.shareConf(node, undefined, root); // Return if this object is not to be synchronized if (!conf) return; // If called by a parent, set the relation if (parentNode) { parentNode.children[node.mulwapp_guid] = true; } // Set properties in the doc node if (conf.watch_props) { for (var i = 0; i < conf.watch_props.length; i++) { var prop = conf.watch_props[i]; var val = prop.split('.').reduce(function (prev, step) { return prev[step]; }, node); docNode.props[prop] = val; } } // Recurse on children for (var i = 0; i < node.children.length; i++) { aux.call(this, node.children[i], docNode); } } // Recurse on dependencies from create spec for (var i = 0; i < docNode.dependencies.length; i++) { aux.call(this, this.allLocalObjects[docNode.dependencies[i]], undefined); } doc[node.mulwapp_guid] = docNode; }).call(this, root, undefined); return doc; } /** * Intercepts constructor calls to create a create specification before * creating the object. * @param {Mulwapp} mulwapp - A reference to a Mulwapp object * @param {Array} constructors - A list of constructors to intercept */ ThreeAdapter.prototype.setupConstructorInterceptors = function (mulwapp, constructors) { if (CONSTRUCTORS_DECORATED) throw 'Constructors have already been decorated'; CONSTRUCTORS_DECORATED = true; var _this = this; constructors.forEach(function (name) { var backupName = '_' + name; // Backup the original constructor somewhere THREE[backupName] = THREE[name]; // Override with your own, then call the original THREE[name] = function () { // Decorate constructor if (!this._mulwapp_remote_create) { if (mulwapp.applicationInitializationOngoing) { this.mulwapp_guid = 'guid' + _this.nextIncrementalGuid; _this.nextIncrementalGuid++; } var spec = _this.generateCreateSpec(name, this.mulwapp_guid, arguments); this.mulwapp_create_spec = spec; _this.allLocalObjects[this.mulwapp_guid] = this; } // Call original constructor THREE[backupName].apply(this, arguments); } // Extend the original class THREE[name].prototype = Object.create(THREE[backupName].prototype); }); } /** * Generate the specification that is used by remote peers to replay * object creation. * @param {string} name - The name of the object type * @param {string} guid - The mulwapp_guid of the object * @param {Array} argum - The arguments given to the local constructor */ ThreeAdapter.prototype.generateCreateSpec = function (name, guid, argum) { var args = []; // Argum is not an Array, but function parameters which is 'array like' for (var i = 0; i < argum.length; i++) { var arg = argum[i]; if ((typeof arg) == 'object' && arg.mulwapp_guid != undefined) { args.push({primitive: false, value: arg.mulwapp_guid}); } else { args.push({primitive: true, value: arg}); } } return {type: name, mulwapp_guid: guid, args: args} } /** * Constructs an object from a specification made by Mulwapp.generateCreateSpec * @param {Object} spec Specification needed to create the object. * @return {Object} The object created */ ThreeAdapter.prototype.constructorReplayer = function (spec) { function F(args)
F.prototype = THREE[spec.type].prototype; // Parse argument list var args = []; spec.args.forEach(function (e) { if (e.primitive) args.push(e.value); else args.push(this.lookupNodeByGuid(e.value)); }, this); // Create object var o = new F(args); o.mulwapp_guid = spec.mulwapp_guid; this.allLocalObjects[spec.mulwapp_guid] = o; return o; } /** * */ ThreeAdapter.prototype.modelUpdater = function (op) { var setProp = function (node, prop, val) { var propPath = prop.split('.'); propPath.slice(0, -1).forEach(function (step) { node = node[step]; }); node[propPath[propPath.length - 1]] = val; } var node = this.lookupNodeByGuid(op.guid); if (op.type == 'update prop') { setProp(node, op.key, op.val); } else if (op.type == 'insert child') { var child = this.lookupNodeByGuid(op.key); node.add(child); } else if (op.type == 'delete child') { var child = this.lookupNodeByGuid(op.key); node.remove(child); } else if (op.type == 'insert object') { this.constructorReplayer(op.val.extra); } else if (op.type == 'delete object') { delete this.allLocalObjects[op.guid]; } } /** * */ ThreeAdapter.prototype.lookupNodeByGuid = function (guid) { return this.allLocalObjects[guid]; } ThreeAdapter.prototype.getConstructors = function () { return [ // "REVISION", // "log", // "warn", // "error", // "MOUSE", // "CullFaceNone", // "CullFaceBack", // "CullFaceFront", // "CullFaceFrontBack", // "FrontFaceDirectionCW", // "FrontFaceDirectionCCW", // "BasicShadowMap", // "PCFShadowMap", // "PCFSoftShadowMap", // "FrontSide", // "BackSide", // "DoubleSide", // "NoShading", // "FlatShading", // "SmoothShading", // "NoColors", // "FaceColors", // "VertexColors", // "NoBlending", // "NormalBlending", // "AdditiveBlending", // "SubtractiveBlending", // "MultiplyBlending", // "CustomBlending", // "AddEquation", // "SubtractEquation", // "ReverseSubtractEquation", // "MinEquation", // "MaxEquation", // "ZeroFactor", // "OneFactor", // "SrcColorFactor", // "OneMinusSrcColorFactor", // "SrcAlphaFactor", // "OneMinusSrcAlphaFactor", // "DstAlphaFactor", // "OneMinusDstAlphaFactor", // "DstColorFactor", // "OneMinusDstColorFactor", // "SrcAlphaSaturateFactor", // "MultiplyOperation", // "MixOperation", // "AddOperation", // "UVMapping", // "CubeReflectionMapping", // "CubeRefractionMapping", // "EquirectangularReflectionMapping", // "EquirectangularRefractionMapping", // "SphericalReflectionMapping", // "RepeatWrapping", // "ClampToEdgeWrapping", // "MirroredRepeatWrapping", // "NearestFilter", // "NearestMipMapNearestFilter", // "NearestMipMapLinearFilter", // "LinearFilter", // "LinearMipMapNearestFilter", // "LinearMipMapLinearFilter", // "UnsignedByteType", // "ByteType", // "ShortType", // "UnsignedShortType", // "IntType", // "UnsignedIntType", // "FloatType", // "HalfFloatType", // "UnsignedShort4444Type", // "UnsignedShort5551Type", // "UnsignedShort565Type", // "AlphaFormat", // "RGBFormat", // "RGBAFormat", // "LuminanceFormat", // "LuminanceAlphaFormat", // "RGBEFormat", // "RGB_S3TC_DXT1_Format", // "RGBA_S3TC_DXT1_Format", // "RGBA_S3TC_DXT3_Format", // "RGBA_S3TC_DXT5_Format", // "RGB_PVRTC_4BPPV1_Format", // "RGB_PVRTC_2BPPV1_Format", // "RGBA_PVRTC_4BPPV1_Format", // "RGBA_PVRTC_2BPPV1_Format", // "Projector", // "CanvasRenderer", // "Color", // "ColorKeywords", // "Quaternion", // "Vector2", // "Vector3", // "Vector4", // "Euler", // "Line3", // "Box2", // "Box3", // "Matrix3", // "Matrix4", // "Ray", // "Sphere", // "Frustum", // "Plane", // "Math", // "Spline", // "Triangle", // "Clock", // "EventDispatcher", // "Raycaster", // "Object3D", // "Object3DIdCount", // "Face3", // "Face4", // "BufferAttribute", // "Int8Attribute", // "Uint8Attribute", // "Uint8ClampedAttribute", // "Int16Attribute", // "Uint16Attribute", // "Int32Attribute", // "Uint32Attribute", // "Float32Attribute", // "Float64Attribute", // "DynamicBufferAttribute", "BufferGeometry", // "Geometry", // "GeometryIdCount", // "Camera", // "CubeCamera", "OrthographicCamera", "PerspectiveCamera", // "Light", "AmbientLight", // "AreaLight", "DirectionalLight", // "HemisphereLight", "PointLight", // "SpotLight", // "Cache", // "Loader", // "XHRLoader", // "ImageLoader", // "JSONLoader", // "LoadingManager", // "DefaultLoadingManager", // "BufferGeometryLoader", // "MaterialLoader", // "ObjectLoader", // "TextureLoader", // "BinaryTextureLoader", // "DataTextureLoader", // "CompressedTextureLoader", // "Material", // "MaterialIdCount", // "LineBasicMaterial", // "LineDashedMaterial", "MeshBasicMaterial", "MeshLambertMaterial", "MeshPhongMaterial", // "MeshDepthMaterial", // "MeshNormalMaterial", // "MeshFaceMaterial", // "PointCloudMaterial", // "ParticleBasicMaterial", // "ParticleSystemMaterial", // "ShaderMaterial", // "RawShaderMaterial", // "SpriteMaterial", // "Texture", // "TextureIdCount", // "CubeTexture", // "CompressedTexture", // "DataTexture", // "VideoTexture", "Group", // "PointCloud", // "ParticleSystem", // "Line", // "LineStrip", // "LinePieces", "Mesh", // "Bone", // "Skeleton", // "SkinnedMesh", // "MorphAnimMesh", // "LOD", // "Sprite", // "Particle", // "LensFlare", "Scene", // "Fog", // "FogExp2", // "ShaderChunk", // "UniformsUtils", // "UniformsLib", // "ShaderLib", // "WebGLRenderer", // "WebGLRenderTarget", // "WebGLRenderTargetCube", // "WebGLExtensions", // "WebGLProgram", // "WebGLShader", // "WebGLState", // "LensFlarePlugin", // "ShadowMapPlugin", // "SpritePlugin", // "GeometryUtils", // "ImageUtils", // "SceneUtils", // "FontUtils", // "typeface_js", // "Audio", // "AudioListener", // "Curve", // "CurvePath", // "Gyroscope", // "Path", // "PathActions", // "Shape", // "LineCurve", // "QuadraticBezierCurve", // "CubicBezierCurve", // "SplineCurve", // "EllipseCurve", // "ArcCurve", // "LineCurve3", // "QuadraticBezierCurve3", // "CubicBezierCurve3", // "SplineCurve3", // "ClosedSplineCurve3", // "AnimationHandler", // "Animation", // "KeyFrameAnimation", // "MorphAnimation", "BoxGeometry", // "CircleGeometry", // "CubeGeometry", // "CylinderGeometry", // "ExtrudeGeometry", // "ShapeGeometry", // "LatheGeometry", "PlaneGeometry", "PlaneBufferGeometry", // "RingGeometry", "SphereGeometry", // "TextGeometry", // "TorusGeometry", // "TorusKnotGeometry", // "TubeGeometry", // "PolyhedronGeometry", // "DodecahedronGeometry", // "IcosahedronGeometry", // "OctahedronGeometry", // "TetrahedronGeometry", // "ParametricGeometry", // "AxisHelper", // "ArrowHelper", // "BoxHelper", // "BoundingBoxHelper", // "CameraHelper", // "DirectionalLightHelper", // "EdgesHelper", // "FaceNormalsHelper", // "GridHelper", // "HemisphereLightHelper", // "PointLightHelper", // "SkeletonHelper", // "SpotLightHelper", // "VertexNormalsHelper", // "VertexTangentsHelper", // "WireframeHelper", // "ImmediateRenderObject", // "MorphBlendMesh" ]; }
{ this._mulwapp_remote_create = true; this.mulwapp_create_spec = spec; return THREE[spec.type].apply(this, args); }
identifier_body
threejs_adapter.js
/** * A Library Adapter Layer for ThreeJS */ CONSTRUCTORS_DECORATED = false; GUID_PROPERTY_ADDED = false; /** * Constructor for the ThreeJS Library Adapter Layer. * @param {object} config - A configuration object. Must contain a 'shareConf' * function. */ ThreeAdapter = function (config) { this.config = config; this.allLocalObjects = {}; this.nextIncrementalGuid = 0; } /** * */ ThreeAdapter.prototype.initialize = function (mulwapp) { this.addGuidProperty(); this.setupConstructorInterceptors(mulwapp, this.getConstructors()); } /** * */ ThreeAdapter.prototype.addGuidProperty = function () { if (GUID_PROPERTY_ADDED) throw 'GUID property has already been added'; GUID_PROPERTY_ADDED = true; if ('mulwapp_guid' in THREE.Object3D.prototype) { return; } var propobj = { get: function () { if (this._mulwapp_guid == undefined) { this._mulwapp_guid = '' + Math.random(); } return this._mulwapp_guid; }, set: function (x) { this._mulwapp_guid = x; } }; Object.defineProperty(THREE.Object3D.prototype, 'mulwapp_guid', propobj); Object.defineProperty(THREE.Geometry.prototype, 'mulwapp_guid', propobj); Object.defineProperty(THREE.Material.prototype, 'mulwapp_guid', propobj); } /** * Fetches dependencies out of the create spec and returns them as a list * @param {object} a create spec * @return {Array} a list of object dependencies */ ThreeAdapter.prototype.getDependencies = function (createSpec) { var filter = function (arg) { return !arg.primitive; } var map = function (arg) { return arg.value; } return createSpec.args.filter(filter).map(map); } /** * Calculates the diff model * @param {THREE.Scene} root - The scene graph root * @returns The diff model of the current scene graph */ ThreeAdapter.prototype.calculateDiffModel = function (root) { var doc = {}; (function aux (node, parentNode) { var docNode = { 'extra' : node.mulwapp_create_spec, 'dependencies' : this.getDependencies(node.mulwapp_create_spec), 'props' : {}, 'children' : {} }; if (node instanceof THREE.Object3D) { var conf = this.config.shareConf(node, undefined, root); // Return if this object is not to be synchronized if (!conf) return; // If called by a parent, set the relation if (parentNode) { parentNode.children[node.mulwapp_guid] = true; } // Set properties in the doc node if (conf.watch_props) { for (var i = 0; i < conf.watch_props.length; i++) { var prop = conf.watch_props[i]; var val = prop.split('.').reduce(function (prev, step) { return prev[step]; }, node); docNode.props[prop] = val; } } // Recurse on children for (var i = 0; i < node.children.length; i++) { aux.call(this, node.children[i], docNode); } } // Recurse on dependencies from create spec for (var i = 0; i < docNode.dependencies.length; i++) { aux.call(this, this.allLocalObjects[docNode.dependencies[i]], undefined); } doc[node.mulwapp_guid] = docNode; }).call(this, root, undefined); return doc; } /** * Intercepts constructor calls to create a create specification before * creating the object. * @param {Mulwapp} mulwapp - A reference to a Mulwapp object * @param {Array} constructors - A list of constructors to intercept */ ThreeAdapter.prototype.setupConstructorInterceptors = function (mulwapp, constructors) { if (CONSTRUCTORS_DECORATED) throw 'Constructors have already been decorated'; CONSTRUCTORS_DECORATED = true; var _this = this; constructors.forEach(function (name) { var backupName = '_' + name; // Backup the original constructor somewhere THREE[backupName] = THREE[name]; // Override with your own, then call the original THREE[name] = function () { // Decorate constructor if (!this._mulwapp_remote_create) { if (mulwapp.applicationInitializationOngoing) { this.mulwapp_guid = 'guid' + _this.nextIncrementalGuid; _this.nextIncrementalGuid++; } var spec = _this.generateCreateSpec(name, this.mulwapp_guid, arguments); this.mulwapp_create_spec = spec; _this.allLocalObjects[this.mulwapp_guid] = this; } // Call original constructor THREE[backupName].apply(this, arguments); } // Extend the original class THREE[name].prototype = Object.create(THREE[backupName].prototype); }); } /** * Generate the specification that is used by remote peers to replay * object creation. * @param {string} name - The name of the object type * @param {string} guid - The mulwapp_guid of the object * @param {Array} argum - The arguments given to the local constructor */ ThreeAdapter.prototype.generateCreateSpec = function (name, guid, argum) { var args = []; // Argum is not an Array, but function parameters which is 'array like' for (var i = 0; i < argum.length; i++) { var arg = argum[i]; if ((typeof arg) == 'object' && arg.mulwapp_guid != undefined) { args.push({primitive: false, value: arg.mulwapp_guid}); } else { args.push({primitive: true, value: arg}); } } return {type: name, mulwapp_guid: guid, args: args} } /** * Constructs an object from a specification made by Mulwapp.generateCreateSpec * @param {Object} spec Specification needed to create the object. * @return {Object} The object created */ ThreeAdapter.prototype.constructorReplayer = function (spec) { function F(args) { this._mulwapp_remote_create = true; this.mulwapp_create_spec = spec; return THREE[spec.type].apply(this, args); } F.prototype = THREE[spec.type].prototype; // Parse argument list var args = []; spec.args.forEach(function (e) { if (e.primitive) args.push(e.value); else args.push(this.lookupNodeByGuid(e.value)); }, this); // Create object var o = new F(args); o.mulwapp_guid = spec.mulwapp_guid; this.allLocalObjects[spec.mulwapp_guid] = o; return o; } /** * */ ThreeAdapter.prototype.modelUpdater = function (op) { var setProp = function (node, prop, val) { var propPath = prop.split('.'); propPath.slice(0, -1).forEach(function (step) { node = node[step]; }); node[propPath[propPath.length - 1]] = val; } var node = this.lookupNodeByGuid(op.guid); if (op.type == 'update prop') { setProp(node, op.key, op.val); } else if (op.type == 'insert child') { var child = this.lookupNodeByGuid(op.key); node.add(child); } else if (op.type == 'delete child') { var child = this.lookupNodeByGuid(op.key); node.remove(child); } else if (op.type == 'insert object') { this.constructorReplayer(op.val.extra); } else if (op.type == 'delete object') { delete this.allLocalObjects[op.guid]; } } /** * */ ThreeAdapter.prototype.lookupNodeByGuid = function (guid) { return this.allLocalObjects[guid]; } ThreeAdapter.prototype.getConstructors = function () { return [ // "REVISION", // "log", // "warn", // "error", // "MOUSE", // "CullFaceNone", // "CullFaceBack", // "CullFaceFront", // "CullFaceFrontBack", // "FrontFaceDirectionCW", // "FrontFaceDirectionCCW", // "BasicShadowMap", // "PCFShadowMap", // "PCFSoftShadowMap", // "FrontSide", // "BackSide", // "DoubleSide", // "NoShading", // "FlatShading", // "SmoothShading", // "NoColors", // "FaceColors", // "VertexColors", // "NoBlending", // "NormalBlending", // "AdditiveBlending", // "SubtractiveBlending", // "MultiplyBlending", // "CustomBlending", // "AddEquation", // "SubtractEquation", // "ReverseSubtractEquation", // "MinEquation", // "MaxEquation", // "ZeroFactor", // "OneFactor", // "SrcColorFactor", // "OneMinusSrcColorFactor", // "SrcAlphaFactor", // "OneMinusSrcAlphaFactor", // "DstAlphaFactor", // "OneMinusDstAlphaFactor", // "DstColorFactor", // "OneMinusDstColorFactor", // "SrcAlphaSaturateFactor", // "MultiplyOperation", // "MixOperation", // "AddOperation", // "UVMapping", // "CubeReflectionMapping", // "CubeRefractionMapping", // "EquirectangularReflectionMapping", // "EquirectangularRefractionMapping", // "SphericalReflectionMapping", // "RepeatWrapping", // "ClampToEdgeWrapping", // "MirroredRepeatWrapping", // "NearestFilter", // "NearestMipMapNearestFilter", // "NearestMipMapLinearFilter", // "LinearFilter", // "LinearMipMapNearestFilter", // "LinearMipMapLinearFilter", // "UnsignedByteType", // "ByteType", // "ShortType", // "UnsignedShortType", // "IntType", // "UnsignedIntType", // "FloatType", // "HalfFloatType", // "UnsignedShort4444Type", // "UnsignedShort5551Type", // "UnsignedShort565Type", // "AlphaFormat", // "RGBFormat", // "RGBAFormat", // "LuminanceFormat", // "LuminanceAlphaFormat", // "RGBEFormat", // "RGB_S3TC_DXT1_Format", // "RGBA_S3TC_DXT1_Format", // "RGBA_S3TC_DXT3_Format", // "RGBA_S3TC_DXT5_Format", // "RGB_PVRTC_4BPPV1_Format", // "RGB_PVRTC_2BPPV1_Format", // "RGBA_PVRTC_4BPPV1_Format", // "RGBA_PVRTC_2BPPV1_Format", // "Projector", // "CanvasRenderer", // "Color", // "ColorKeywords", // "Quaternion", // "Vector2", // "Vector3", // "Vector4", // "Euler", // "Line3", // "Box2", // "Box3", // "Matrix3", // "Matrix4", // "Ray", // "Sphere", // "Frustum", // "Plane", // "Math", // "Spline", // "Triangle", // "Clock", // "EventDispatcher", // "Raycaster", // "Object3D", // "Object3DIdCount", // "Face3", // "Face4", // "BufferAttribute", // "Int8Attribute", // "Uint8Attribute", // "Uint8ClampedAttribute", // "Int16Attribute", // "Uint16Attribute", // "Int32Attribute", // "Uint32Attribute", // "Float32Attribute", // "Float64Attribute", // "DynamicBufferAttribute", "BufferGeometry", // "Geometry", // "GeometryIdCount", // "Camera", // "CubeCamera", "OrthographicCamera", "PerspectiveCamera", // "Light", "AmbientLight", // "AreaLight", "DirectionalLight",
// "XHRLoader", // "ImageLoader", // "JSONLoader", // "LoadingManager", // "DefaultLoadingManager", // "BufferGeometryLoader", // "MaterialLoader", // "ObjectLoader", // "TextureLoader", // "BinaryTextureLoader", // "DataTextureLoader", // "CompressedTextureLoader", // "Material", // "MaterialIdCount", // "LineBasicMaterial", // "LineDashedMaterial", "MeshBasicMaterial", "MeshLambertMaterial", "MeshPhongMaterial", // "MeshDepthMaterial", // "MeshNormalMaterial", // "MeshFaceMaterial", // "PointCloudMaterial", // "ParticleBasicMaterial", // "ParticleSystemMaterial", // "ShaderMaterial", // "RawShaderMaterial", // "SpriteMaterial", // "Texture", // "TextureIdCount", // "CubeTexture", // "CompressedTexture", // "DataTexture", // "VideoTexture", "Group", // "PointCloud", // "ParticleSystem", // "Line", // "LineStrip", // "LinePieces", "Mesh", // "Bone", // "Skeleton", // "SkinnedMesh", // "MorphAnimMesh", // "LOD", // "Sprite", // "Particle", // "LensFlare", "Scene", // "Fog", // "FogExp2", // "ShaderChunk", // "UniformsUtils", // "UniformsLib", // "ShaderLib", // "WebGLRenderer", // "WebGLRenderTarget", // "WebGLRenderTargetCube", // "WebGLExtensions", // "WebGLProgram", // "WebGLShader", // "WebGLState", // "LensFlarePlugin", // "ShadowMapPlugin", // "SpritePlugin", // "GeometryUtils", // "ImageUtils", // "SceneUtils", // "FontUtils", // "typeface_js", // "Audio", // "AudioListener", // "Curve", // "CurvePath", // "Gyroscope", // "Path", // "PathActions", // "Shape", // "LineCurve", // "QuadraticBezierCurve", // "CubicBezierCurve", // "SplineCurve", // "EllipseCurve", // "ArcCurve", // "LineCurve3", // "QuadraticBezierCurve3", // "CubicBezierCurve3", // "SplineCurve3", // "ClosedSplineCurve3", // "AnimationHandler", // "Animation", // "KeyFrameAnimation", // "MorphAnimation", "BoxGeometry", // "CircleGeometry", // "CubeGeometry", // "CylinderGeometry", // "ExtrudeGeometry", // "ShapeGeometry", // "LatheGeometry", "PlaneGeometry", "PlaneBufferGeometry", // "RingGeometry", "SphereGeometry", // "TextGeometry", // "TorusGeometry", // "TorusKnotGeometry", // "TubeGeometry", // "PolyhedronGeometry", // "DodecahedronGeometry", // "IcosahedronGeometry", // "OctahedronGeometry", // "TetrahedronGeometry", // "ParametricGeometry", // "AxisHelper", // "ArrowHelper", // "BoxHelper", // "BoundingBoxHelper", // "CameraHelper", // "DirectionalLightHelper", // "EdgesHelper", // "FaceNormalsHelper", // "GridHelper", // "HemisphereLightHelper", // "PointLightHelper", // "SkeletonHelper", // "SpotLightHelper", // "VertexNormalsHelper", // "VertexTangentsHelper", // "WireframeHelper", // "ImmediateRenderObject", // "MorphBlendMesh" ]; }
// "HemisphereLight", "PointLight", // "SpotLight", // "Cache", // "Loader",
random_line_split
threejs_adapter.js
/** * A Library Adapter Layer for ThreeJS */ CONSTRUCTORS_DECORATED = false; GUID_PROPERTY_ADDED = false; /** * Constructor for the ThreeJS Library Adapter Layer. * @param {object} config - A configuration object. Must contain a 'shareConf' * function. */ ThreeAdapter = function (config) { this.config = config; this.allLocalObjects = {}; this.nextIncrementalGuid = 0; } /** * */ ThreeAdapter.prototype.initialize = function (mulwapp) { this.addGuidProperty(); this.setupConstructorInterceptors(mulwapp, this.getConstructors()); } /** * */ ThreeAdapter.prototype.addGuidProperty = function () { if (GUID_PROPERTY_ADDED) throw 'GUID property has already been added'; GUID_PROPERTY_ADDED = true; if ('mulwapp_guid' in THREE.Object3D.prototype) { return; } var propobj = { get: function () { if (this._mulwapp_guid == undefined) { this._mulwapp_guid = '' + Math.random(); } return this._mulwapp_guid; }, set: function (x) { this._mulwapp_guid = x; } }; Object.defineProperty(THREE.Object3D.prototype, 'mulwapp_guid', propobj); Object.defineProperty(THREE.Geometry.prototype, 'mulwapp_guid', propobj); Object.defineProperty(THREE.Material.prototype, 'mulwapp_guid', propobj); } /** * Fetches dependencies out of the create spec and returns them as a list * @param {object} a create spec * @return {Array} a list of object dependencies */ ThreeAdapter.prototype.getDependencies = function (createSpec) { var filter = function (arg) { return !arg.primitive; } var map = function (arg) { return arg.value; } return createSpec.args.filter(filter).map(map); } /** * Calculates the diff model * @param {THREE.Scene} root - The scene graph root * @returns The diff model of the current scene graph */ ThreeAdapter.prototype.calculateDiffModel = function (root) { var doc = {}; (function aux (node, parentNode) { var docNode = { 'extra' : node.mulwapp_create_spec, 'dependencies' : this.getDependencies(node.mulwapp_create_spec), 'props' : {}, 'children' : {} }; if (node instanceof THREE.Object3D) { var conf = this.config.shareConf(node, undefined, root); // Return if this object is not to be synchronized if (!conf) return; // If called by a parent, set the relation if (parentNode) { parentNode.children[node.mulwapp_guid] = true; } // Set properties in the doc node if (conf.watch_props) { for (var i = 0; i < conf.watch_props.length; i++) { var prop = conf.watch_props[i]; var val = prop.split('.').reduce(function (prev, step) { return prev[step]; }, node); docNode.props[prop] = val; } } // Recurse on children for (var i = 0; i < node.children.length; i++) { aux.call(this, node.children[i], docNode); } } // Recurse on dependencies from create spec for (var i = 0; i < docNode.dependencies.length; i++) { aux.call(this, this.allLocalObjects[docNode.dependencies[i]], undefined); } doc[node.mulwapp_guid] = docNode; }).call(this, root, undefined); return doc; } /** * Intercepts constructor calls to create a create specification before * creating the object. * @param {Mulwapp} mulwapp - A reference to a Mulwapp object * @param {Array} constructors - A list of constructors to intercept */ ThreeAdapter.prototype.setupConstructorInterceptors = function (mulwapp, constructors) { if (CONSTRUCTORS_DECORATED) throw 'Constructors have already been decorated'; CONSTRUCTORS_DECORATED = true; var _this = this; constructors.forEach(function (name) { var backupName = '_' + name; // Backup the original constructor somewhere THREE[backupName] = THREE[name]; // Override with your own, then call the original THREE[name] = function () { // Decorate constructor if (!this._mulwapp_remote_create) { if (mulwapp.applicationInitializationOngoing) { this.mulwapp_guid = 'guid' + _this.nextIncrementalGuid; _this.nextIncrementalGuid++; } var spec = _this.generateCreateSpec(name, this.mulwapp_guid, arguments); this.mulwapp_create_spec = spec; _this.allLocalObjects[this.mulwapp_guid] = this; } // Call original constructor THREE[backupName].apply(this, arguments); } // Extend the original class THREE[name].prototype = Object.create(THREE[backupName].prototype); }); } /** * Generate the specification that is used by remote peers to replay * object creation. * @param {string} name - The name of the object type * @param {string} guid - The mulwapp_guid of the object * @param {Array} argum - The arguments given to the local constructor */ ThreeAdapter.prototype.generateCreateSpec = function (name, guid, argum) { var args = []; // Argum is not an Array, but function parameters which is 'array like' for (var i = 0; i < argum.length; i++) { var arg = argum[i]; if ((typeof arg) == 'object' && arg.mulwapp_guid != undefined) { args.push({primitive: false, value: arg.mulwapp_guid}); } else { args.push({primitive: true, value: arg}); } } return {type: name, mulwapp_guid: guid, args: args} } /** * Constructs an object from a specification made by Mulwapp.generateCreateSpec * @param {Object} spec Specification needed to create the object. * @return {Object} The object created */ ThreeAdapter.prototype.constructorReplayer = function (spec) { function
(args) { this._mulwapp_remote_create = true; this.mulwapp_create_spec = spec; return THREE[spec.type].apply(this, args); } F.prototype = THREE[spec.type].prototype; // Parse argument list var args = []; spec.args.forEach(function (e) { if (e.primitive) args.push(e.value); else args.push(this.lookupNodeByGuid(e.value)); }, this); // Create object var o = new F(args); o.mulwapp_guid = spec.mulwapp_guid; this.allLocalObjects[spec.mulwapp_guid] = o; return o; } /** * */ ThreeAdapter.prototype.modelUpdater = function (op) { var setProp = function (node, prop, val) { var propPath = prop.split('.'); propPath.slice(0, -1).forEach(function (step) { node = node[step]; }); node[propPath[propPath.length - 1]] = val; } var node = this.lookupNodeByGuid(op.guid); if (op.type == 'update prop') { setProp(node, op.key, op.val); } else if (op.type == 'insert child') { var child = this.lookupNodeByGuid(op.key); node.add(child); } else if (op.type == 'delete child') { var child = this.lookupNodeByGuid(op.key); node.remove(child); } else if (op.type == 'insert object') { this.constructorReplayer(op.val.extra); } else if (op.type == 'delete object') { delete this.allLocalObjects[op.guid]; } } /** * */ ThreeAdapter.prototype.lookupNodeByGuid = function (guid) { return this.allLocalObjects[guid]; } ThreeAdapter.prototype.getConstructors = function () { return [ // "REVISION", // "log", // "warn", // "error", // "MOUSE", // "CullFaceNone", // "CullFaceBack", // "CullFaceFront", // "CullFaceFrontBack", // "FrontFaceDirectionCW", // "FrontFaceDirectionCCW", // "BasicShadowMap", // "PCFShadowMap", // "PCFSoftShadowMap", // "FrontSide", // "BackSide", // "DoubleSide", // "NoShading", // "FlatShading", // "SmoothShading", // "NoColors", // "FaceColors", // "VertexColors", // "NoBlending", // "NormalBlending", // "AdditiveBlending", // "SubtractiveBlending", // "MultiplyBlending", // "CustomBlending", // "AddEquation", // "SubtractEquation", // "ReverseSubtractEquation", // "MinEquation", // "MaxEquation", // "ZeroFactor", // "OneFactor", // "SrcColorFactor", // "OneMinusSrcColorFactor", // "SrcAlphaFactor", // "OneMinusSrcAlphaFactor", // "DstAlphaFactor", // "OneMinusDstAlphaFactor", // "DstColorFactor", // "OneMinusDstColorFactor", // "SrcAlphaSaturateFactor", // "MultiplyOperation", // "MixOperation", // "AddOperation", // "UVMapping", // "CubeReflectionMapping", // "CubeRefractionMapping", // "EquirectangularReflectionMapping", // "EquirectangularRefractionMapping", // "SphericalReflectionMapping", // "RepeatWrapping", // "ClampToEdgeWrapping", // "MirroredRepeatWrapping", // "NearestFilter", // "NearestMipMapNearestFilter", // "NearestMipMapLinearFilter", // "LinearFilter", // "LinearMipMapNearestFilter", // "LinearMipMapLinearFilter", // "UnsignedByteType", // "ByteType", // "ShortType", // "UnsignedShortType", // "IntType", // "UnsignedIntType", // "FloatType", // "HalfFloatType", // "UnsignedShort4444Type", // "UnsignedShort5551Type", // "UnsignedShort565Type", // "AlphaFormat", // "RGBFormat", // "RGBAFormat", // "LuminanceFormat", // "LuminanceAlphaFormat", // "RGBEFormat", // "RGB_S3TC_DXT1_Format", // "RGBA_S3TC_DXT1_Format", // "RGBA_S3TC_DXT3_Format", // "RGBA_S3TC_DXT5_Format", // "RGB_PVRTC_4BPPV1_Format", // "RGB_PVRTC_2BPPV1_Format", // "RGBA_PVRTC_4BPPV1_Format", // "RGBA_PVRTC_2BPPV1_Format", // "Projector", // "CanvasRenderer", // "Color", // "ColorKeywords", // "Quaternion", // "Vector2", // "Vector3", // "Vector4", // "Euler", // "Line3", // "Box2", // "Box3", // "Matrix3", // "Matrix4", // "Ray", // "Sphere", // "Frustum", // "Plane", // "Math", // "Spline", // "Triangle", // "Clock", // "EventDispatcher", // "Raycaster", // "Object3D", // "Object3DIdCount", // "Face3", // "Face4", // "BufferAttribute", // "Int8Attribute", // "Uint8Attribute", // "Uint8ClampedAttribute", // "Int16Attribute", // "Uint16Attribute", // "Int32Attribute", // "Uint32Attribute", // "Float32Attribute", // "Float64Attribute", // "DynamicBufferAttribute", "BufferGeometry", // "Geometry", // "GeometryIdCount", // "Camera", // "CubeCamera", "OrthographicCamera", "PerspectiveCamera", // "Light", "AmbientLight", // "AreaLight", "DirectionalLight", // "HemisphereLight", "PointLight", // "SpotLight", // "Cache", // "Loader", // "XHRLoader", // "ImageLoader", // "JSONLoader", // "LoadingManager", // "DefaultLoadingManager", // "BufferGeometryLoader", // "MaterialLoader", // "ObjectLoader", // "TextureLoader", // "BinaryTextureLoader", // "DataTextureLoader", // "CompressedTextureLoader", // "Material", // "MaterialIdCount", // "LineBasicMaterial", // "LineDashedMaterial", "MeshBasicMaterial", "MeshLambertMaterial", "MeshPhongMaterial", // "MeshDepthMaterial", // "MeshNormalMaterial", // "MeshFaceMaterial", // "PointCloudMaterial", // "ParticleBasicMaterial", // "ParticleSystemMaterial", // "ShaderMaterial", // "RawShaderMaterial", // "SpriteMaterial", // "Texture", // "TextureIdCount", // "CubeTexture", // "CompressedTexture", // "DataTexture", // "VideoTexture", "Group", // "PointCloud", // "ParticleSystem", // "Line", // "LineStrip", // "LinePieces", "Mesh", // "Bone", // "Skeleton", // "SkinnedMesh", // "MorphAnimMesh", // "LOD", // "Sprite", // "Particle", // "LensFlare", "Scene", // "Fog", // "FogExp2", // "ShaderChunk", // "UniformsUtils", // "UniformsLib", // "ShaderLib", // "WebGLRenderer", // "WebGLRenderTarget", // "WebGLRenderTargetCube", // "WebGLExtensions", // "WebGLProgram", // "WebGLShader", // "WebGLState", // "LensFlarePlugin", // "ShadowMapPlugin", // "SpritePlugin", // "GeometryUtils", // "ImageUtils", // "SceneUtils", // "FontUtils", // "typeface_js", // "Audio", // "AudioListener", // "Curve", // "CurvePath", // "Gyroscope", // "Path", // "PathActions", // "Shape", // "LineCurve", // "QuadraticBezierCurve", // "CubicBezierCurve", // "SplineCurve", // "EllipseCurve", // "ArcCurve", // "LineCurve3", // "QuadraticBezierCurve3", // "CubicBezierCurve3", // "SplineCurve3", // "ClosedSplineCurve3", // "AnimationHandler", // "Animation", // "KeyFrameAnimation", // "MorphAnimation", "BoxGeometry", // "CircleGeometry", // "CubeGeometry", // "CylinderGeometry", // "ExtrudeGeometry", // "ShapeGeometry", // "LatheGeometry", "PlaneGeometry", "PlaneBufferGeometry", // "RingGeometry", "SphereGeometry", // "TextGeometry", // "TorusGeometry", // "TorusKnotGeometry", // "TubeGeometry", // "PolyhedronGeometry", // "DodecahedronGeometry", // "IcosahedronGeometry", // "OctahedronGeometry", // "TetrahedronGeometry", // "ParametricGeometry", // "AxisHelper", // "ArrowHelper", // "BoxHelper", // "BoundingBoxHelper", // "CameraHelper", // "DirectionalLightHelper", // "EdgesHelper", // "FaceNormalsHelper", // "GridHelper", // "HemisphereLightHelper", // "PointLightHelper", // "SkeletonHelper", // "SpotLightHelper", // "VertexNormalsHelper", // "VertexTangentsHelper", // "WireframeHelper", // "ImmediateRenderObject", // "MorphBlendMesh" ]; }
F
identifier_name
main.py
# $Filename$ # $Authors$ # Last Changed: $Date$ $Committer$ $Revision-Id$ # # Copyright (c) 2003-2011, German Aerospace Center (DLR) # All rights reserved. # # #Redistribution and use in source and binary forms, with or without #modification, are permitted provided that the following conditions are #met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the # distribution. # # * Neither the name of the German Aerospace Center nor the names of # its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ Implements the main part of the property widget. """ from PyQt4 import QtGui, QtCore from datafinder.core.configuration.properties import constants from datafinder.gui.user.common.widget.property.editors.factory import EditorFactory from datafinder.gui.user.models.properties import PropertiesModel from datafinder.gui.gen.widgets.property_widget_ui import Ui_propertyWidget __version__ = "$Revision-Id:$" class PropertyWidget(QtGui.QWidget, Ui_propertyWidget): """ Implements the main part of the property widget. """ def __init__(self, parent): """ @see: L{QWidget<PyQt4.QtGui.QWidget>} """ QtGui.QWidget.__init__(self, parent) Ui_propertyWidget.__init__(self) self.setupUi(self) self._model = None self.connect(self.addButton, QtCore.SIGNAL("clicked()"), self._addClickedSlot) self.connect(self.editButton, QtCore.SIGNAL("clicked()"), self._editClickedSlot) self.connect(self.clearValueButton, QtCore.SIGNAL("clicked()"), self._clearValueClickedSlot) self.connect(self.deleteButton, QtCore.SIGNAL("clicked()"), self._deleteClickedSlot) self.connect(self.revertButton, QtCore.SIGNAL("clicked()"), self._revertClickedSlot) self.connect(self.refreshButton, QtCore.SIGNAL("clicked()"), self._refreshClickedSlot) def _propertyStateChangedSlot(self): """ Handles changes of properties of the model and updates the button enabled states in accordance to the selection. """ self._updateButtonStates() def _updateSlot(self, index): """ Slot is called when data of property entry has changed. @param index: The index of the selected index. @type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>} """ if index.isValid(): self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect) def _selectionChangedSlot(self, _): """ Slot is called when the selected property entries changed. """ self._updateButtonStates() def _updateButtonStates(self): """ Updates the enabled state of the add, edit, clear, revert and delete buttons in accordance to the selected properties. """ indexes = self.propertiesTableView.selectionModel().selectedIndexes() self._setInitialButtonState() if not self._model.isReadOnly and len(indexes) > 0: canBeCleared = isDeletable = isRevertable = True for index in indexes: if index.isValid(): canBeCleared &= self._model.canBeCleared(index) isDeletable &= self._model.isDeleteable(index) isRevertable &= self._model.isRevertable(index) # Enable / disable buttons if len(indexes) == 1: self.editButton.setEnabled(self._model.flags(indexes[0]) & QtCore.Qt.ItemIsEditable) self.clearValueButton.setEnabled(canBeCleared) self.deleteButton.setEnabled(isDeletable) self.revertButton.setEnabled(isRevertable) self.addButton.setEnabled(True) def _setInitialButtonState(self): """ Sets the initial button state. """ self.addButton.setEnabled(not self._model.isReadOnly) self.editButton.setEnabled(False) self.clearValueButton.setEnabled(False) self.deleteButton.setEnabled(False) self.revertButton.setEnabled(False) def _addClickedSlot(self): """ Slot is called when the add button is used. """ index = self._model.add() self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect) self.addButton.setEnabled(False) # We have to wait until editing is finished to avoid an invalid model self._editClickedSlot() def _editClickedSlot(self): """ Slot is called when the edit button is used. """ index = self.propertiesTableView.selectionModel().currentIndex() if index.isValid(): self.propertiesTableView.edit(index) def _clearValueClickedSlot(self): """ Slot is called when the set empty button is used. """ selectedIndexes = self._determinePropertyRows() for index in selectedIndexes: if index.isValid(): self._model.clearValue(index) def _determinePropertyRows(self): """ Determines the indexes of the property rows selected by the user. """ selectedIndexes = list() rows = list() # used to check for / avoid multiple entries for index in self.propertiesTableView.selectionModel().selectedIndexes(): if not index.row() in rows: selectedIndexes.append(index) rows.append(index.row()) selectedIndexes.sort(cmp=lambda x, y: cmp(x.row(), y.row()), reverse=True) return selectedIndexes def _deleteClickedSlot(self): """ Slot is called when the delete button is used. """ selectedIndexes = self._determinePropertyRows() for index in selectedIndexes: if index.isValid(): self._model.remove(index) def _revertClickedSlot(self): """ Slot is called when the revert button is used. """ selectedIndexes = self._determinePropertyRows() for index in selectedIndexes: if index.isValid(): self._model.revert(index) def _refreshClickedSlot(self): """ Slot is called when the refresh button is used. """ if self._model.dirty: button = QtGui.QMessageBox.information(self, self.tr("Refresh information"), self.tr("All changes will be lost after the update.\n Do you want to continue?"), QtGui.QMessageBox.Yes|QtGui.QMessageBox.No, QtGui.QMessageBox.Yes) if button == QtGui.QMessageBox.No: return self._model.refresh() self.propertiesTableView.setSortingEnabled(True) def _setModel(self, model): """ Sets the model. @param model: Model representing a set of properties. @type model: L{PropertiesModel<datafinder.gui.user.models.properties.PropertiesModel>} """ self._model = model self.propertiesTableView.setModel(model) self._setInitialButtonState() column, order = self._model.sortProperties self.propertiesTableView.horizontalHeader().setSortIndicator(column, order) self.propertiesTableView.setSortingEnabled(True) propertyTypeNames = [constants.STRING_TYPE, constants.DATETIME_TYPE, constants.NUMBER_TYPE, constants.BOOLEAN_TYPE, constants.LIST_TYPE] self.propertiesTableView.setItemDelegate(_PropertyItemDelegate(propertyTypeNames, model, self)) self.connect(self._model, QtCore.SIGNAL("dataChanged(QModelIndex, QModelIndex)"), self._updateSlot) self.connect(self.propertiesTableView.selectionModel(), QtCore.SIGNAL("selectionChanged(QItemSelection, QItemSelection)"), self._selectionChangedSlot) self.connect(self._model, QtCore.SIGNAL(PropertiesModel.PROPERTY_STATE_CHANGED_SIGNAL), self._propertyStateChangedSlot) def _getModel(self):
def activateRefreshButton(self): """ Activates the refresh button. """ self.refreshButton.show() def deactivateRefreshButton(self): """ De-activates the refresh button. """ self.refreshButton.hide() model = property(_getModel, _setModel) class _PropertyItemDelegate(QtGui.QStyledItemDelegate): """ This item delegate has to choose the right editor for the expected property type and has to handle the conversion of the editor input to a proper model format. """ def __init__(self, propertyTypes, model, parent=None): """ Constructor. @param propertyTypes: Property types available for this property @type propertyTypes: C{list} of C{unicode} @param parent: Parent object of the delegate. @type parent: L{QWidget<PyQt4.QtGui.QWidget>} """ QtGui.QStyledItemDelegate.__init__(self, parent) self._factory = EditorFactory() self._propertyTypes = propertyTypes self.connect(self, QtCore.SIGNAL("closeEditor(QWidget *, QAbstractItemDelegate::EndEditHint)"), self._handleEditorClosedSlot ) self._currentEditedRow = -1 self._currentEditedColumn = -1 self._model = model def _handleEditorClosedSlot(self, _, hint): """ Handles the closing of editor to remove added property entries without property name. """ if hint == QtGui.QAbstractItemDelegate.RevertModelCache \ and self._currentEditedColumn == 0: index = self._model.index(self._currentEditedRow, self._currentEditedColumn) index.model().setData(index, QtCore.QVariant(None)) def createEditor(self, parent, _, index): """ @see: L{createEditor<PyQt4.QtGui.QItemDelegate.createEditor>} """ self._currentEditedRow = index.row() self._currentEditedColumn = index.column() if index.column() == 0: editor = QtGui.QLineEdit(parent) editor.setValidator(_PropertyNameValidator(index.model().propertyNameValidationFunction, editor)) elif index.column() == 1: editor = QtGui.QComboBox(parent) editor.addItems(self._propertyTypes) valueType = index.model().getModelData(index.row(), 1) if valueType in self._propertyTypes: editor.setCurrentIndex(self._propertyTypes.index(valueType)) elif index.column() == 2: propType = index.model().getModelData(index.row(), 1) restriction = index.model().getModelData(index.row(), 4) pyValue = index.model().getModelData(index.row(), 2) editor = self._factory.createEditor(parent, propType, restriction, pyValue) return editor def setModelData(self, editor, model, index): """ @see: L{setModelData<PyQt4.QtGui.QItemDelegate.setModelData>} """ value = self._factory.getValueFromEditor(editor) if type(value) == list: variantList = list() for item in value: variantList.append(QtCore.QVariant(item)) variant = QtCore.QVariant.fromList(variantList) else: variant = QtCore.QVariant(value) model.setData(index, variant) def setEditorData(self, editor, index): """ L{setEditorData<PyQt4.QtGui.QItemDelegate.setEditorData>} """ pyData = index.model().getModelData(index.row(), index.column()) self._factory.setEditorValue(editor, pyData) class _PropertyNameValidator(QtGui.QValidator): """ Custom validator for property name checking. """ def __init__(self, validationFunction, parent=None): """ Constructor. @param validationFunction: Callable function which gets the property name as input and validates it. @type validationFunction: Callable C{object} """ QtGui.QValidator.__init__(self, parent) self._validationFunction = validationFunction def validate(self, inputString, position): """ Overwrites the default implementation. """ result = QtGui.QValidator.Invalid if self._validationFunction(unicode(inputString)) or len(inputString) == 0: result = QtGui.QValidator.Acceptable return (result, position)
""" Getter of the property model. """ return self._model
identifier_body
main.py
# $Filename$ # $Authors$ # Last Changed: $Date$ $Committer$ $Revision-Id$ # # Copyright (c) 2003-2011, German Aerospace Center (DLR) # All rights reserved. # # #Redistribution and use in source and binary forms, with or without #modification, are permitted provided that the following conditions are #met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the # distribution. # # * Neither the name of the German Aerospace Center nor the names of # its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ Implements the main part of the property widget. """ from PyQt4 import QtGui, QtCore from datafinder.core.configuration.properties import constants from datafinder.gui.user.common.widget.property.editors.factory import EditorFactory from datafinder.gui.user.models.properties import PropertiesModel from datafinder.gui.gen.widgets.property_widget_ui import Ui_propertyWidget __version__ = "$Revision-Id:$" class PropertyWidget(QtGui.QWidget, Ui_propertyWidget): """ Implements the main part of the property widget. """ def __init__(self, parent): """ @see: L{QWidget<PyQt4.QtGui.QWidget>} """ QtGui.QWidget.__init__(self, parent) Ui_propertyWidget.__init__(self) self.setupUi(self) self._model = None self.connect(self.addButton, QtCore.SIGNAL("clicked()"), self._addClickedSlot) self.connect(self.editButton, QtCore.SIGNAL("clicked()"), self._editClickedSlot) self.connect(self.clearValueButton, QtCore.SIGNAL("clicked()"), self._clearValueClickedSlot) self.connect(self.deleteButton, QtCore.SIGNAL("clicked()"), self._deleteClickedSlot) self.connect(self.revertButton, QtCore.SIGNAL("clicked()"), self._revertClickedSlot) self.connect(self.refreshButton, QtCore.SIGNAL("clicked()"), self._refreshClickedSlot) def _propertyStateChangedSlot(self): """ Handles changes of properties of the model and updates the button enabled states in accordance to the selection. """ self._updateButtonStates() def _updateSlot(self, index): """ Slot is called when data of property entry has changed. @param index: The index of the selected index. @type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>} """ if index.isValid(): self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect) def _selectionChangedSlot(self, _): """ Slot is called when the selected property entries changed. """ self._updateButtonStates() def _updateButtonStates(self): """ Updates the enabled state of the add, edit, clear, revert and delete buttons in accordance to the selected properties. """ indexes = self.propertiesTableView.selectionModel().selectedIndexes() self._setInitialButtonState() if not self._model.isReadOnly and len(indexes) > 0: canBeCleared = isDeletable = isRevertable = True for index in indexes: if index.isValid(): canBeCleared &= self._model.canBeCleared(index) isDeletable &= self._model.isDeleteable(index) isRevertable &= self._model.isRevertable(index) # Enable / disable buttons if len(indexes) == 1: self.editButton.setEnabled(self._model.flags(indexes[0]) & QtCore.Qt.ItemIsEditable) self.clearValueButton.setEnabled(canBeCleared) self.deleteButton.setEnabled(isDeletable) self.revertButton.setEnabled(isRevertable) self.addButton.setEnabled(True) def _setInitialButtonState(self): """ Sets the initial button state. """ self.addButton.setEnabled(not self._model.isReadOnly) self.editButton.setEnabled(False) self.clearValueButton.setEnabled(False) self.deleteButton.setEnabled(False) self.revertButton.setEnabled(False) def _addClickedSlot(self): """ Slot is called when the add button is used. """ index = self._model.add() self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect) self.addButton.setEnabled(False) # We have to wait until editing is finished to avoid an invalid model self._editClickedSlot() def _editClickedSlot(self): """ Slot is called when the edit button is used. """ index = self.propertiesTableView.selectionModel().currentIndex() if index.isValid(): self.propertiesTableView.edit(index) def _clearValueClickedSlot(self): """ Slot is called when the set empty button is used. """ selectedIndexes = self._determinePropertyRows() for index in selectedIndexes: if index.isValid(): self._model.clearValue(index) def _determinePropertyRows(self): """ Determines the indexes of the property rows selected by the user. """ selectedIndexes = list() rows = list() # used to check for / avoid multiple entries for index in self.propertiesTableView.selectionModel().selectedIndexes(): if not index.row() in rows: selectedIndexes.append(index) rows.append(index.row()) selectedIndexes.sort(cmp=lambda x, y: cmp(x.row(), y.row()), reverse=True) return selectedIndexes def _deleteClickedSlot(self): """ Slot is called when the delete button is used. """ selectedIndexes = self._determinePropertyRows() for index in selectedIndexes: if index.isValid(): self._model.remove(index) def _revertClickedSlot(self): """ Slot is called when the revert button is used. """ selectedIndexes = self._determinePropertyRows() for index in selectedIndexes: if index.isValid(): self._model.revert(index) def _refreshClickedSlot(self): """ Slot is called when the refresh button is used. """ if self._model.dirty: button = QtGui.QMessageBox.information(self, self.tr("Refresh information"), self.tr("All changes will be lost after the update.\n Do you want to continue?"), QtGui.QMessageBox.Yes|QtGui.QMessageBox.No, QtGui.QMessageBox.Yes) if button == QtGui.QMessageBox.No: return self._model.refresh() self.propertiesTableView.setSortingEnabled(True) def _setModel(self, model): """ Sets the model. @param model: Model representing a set of properties. @type model: L{PropertiesModel<datafinder.gui.user.models.properties.PropertiesModel>} """ self._model = model self.propertiesTableView.setModel(model) self._setInitialButtonState() column, order = self._model.sortProperties self.propertiesTableView.horizontalHeader().setSortIndicator(column, order) self.propertiesTableView.setSortingEnabled(True) propertyTypeNames = [constants.STRING_TYPE, constants.DATETIME_TYPE, constants.NUMBER_TYPE, constants.BOOLEAN_TYPE, constants.LIST_TYPE] self.propertiesTableView.setItemDelegate(_PropertyItemDelegate(propertyTypeNames, model, self)) self.connect(self._model, QtCore.SIGNAL("dataChanged(QModelIndex, QModelIndex)"), self._updateSlot) self.connect(self.propertiesTableView.selectionModel(), QtCore.SIGNAL("selectionChanged(QItemSelection, QItemSelection)"), self._selectionChangedSlot) self.connect(self._model, QtCore.SIGNAL(PropertiesModel.PROPERTY_STATE_CHANGED_SIGNAL), self._propertyStateChangedSlot) def _getModel(self): """ Getter of the property model. """ return self._model def activateRefreshButton(self): """ Activates the refresh button. """ self.refreshButton.show() def deactivateRefreshButton(self): """ De-activates the refresh button. """ self.refreshButton.hide() model = property(_getModel, _setModel) class _PropertyItemDelegate(QtGui.QStyledItemDelegate): """ This item delegate has to choose the right editor for the expected property type and has to handle the conversion of the editor input to a proper model format. """ def
(self, propertyTypes, model, parent=None): """ Constructor. @param propertyTypes: Property types available for this property @type propertyTypes: C{list} of C{unicode} @param parent: Parent object of the delegate. @type parent: L{QWidget<PyQt4.QtGui.QWidget>} """ QtGui.QStyledItemDelegate.__init__(self, parent) self._factory = EditorFactory() self._propertyTypes = propertyTypes self.connect(self, QtCore.SIGNAL("closeEditor(QWidget *, QAbstractItemDelegate::EndEditHint)"), self._handleEditorClosedSlot ) self._currentEditedRow = -1 self._currentEditedColumn = -1 self._model = model def _handleEditorClosedSlot(self, _, hint): """ Handles the closing of editor to remove added property entries without property name. """ if hint == QtGui.QAbstractItemDelegate.RevertModelCache \ and self._currentEditedColumn == 0: index = self._model.index(self._currentEditedRow, self._currentEditedColumn) index.model().setData(index, QtCore.QVariant(None)) def createEditor(self, parent, _, index): """ @see: L{createEditor<PyQt4.QtGui.QItemDelegate.createEditor>} """ self._currentEditedRow = index.row() self._currentEditedColumn = index.column() if index.column() == 0: editor = QtGui.QLineEdit(parent) editor.setValidator(_PropertyNameValidator(index.model().propertyNameValidationFunction, editor)) elif index.column() == 1: editor = QtGui.QComboBox(parent) editor.addItems(self._propertyTypes) valueType = index.model().getModelData(index.row(), 1) if valueType in self._propertyTypes: editor.setCurrentIndex(self._propertyTypes.index(valueType)) elif index.column() == 2: propType = index.model().getModelData(index.row(), 1) restriction = index.model().getModelData(index.row(), 4) pyValue = index.model().getModelData(index.row(), 2) editor = self._factory.createEditor(parent, propType, restriction, pyValue) return editor def setModelData(self, editor, model, index): """ @see: L{setModelData<PyQt4.QtGui.QItemDelegate.setModelData>} """ value = self._factory.getValueFromEditor(editor) if type(value) == list: variantList = list() for item in value: variantList.append(QtCore.QVariant(item)) variant = QtCore.QVariant.fromList(variantList) else: variant = QtCore.QVariant(value) model.setData(index, variant) def setEditorData(self, editor, index): """ L{setEditorData<PyQt4.QtGui.QItemDelegate.setEditorData>} """ pyData = index.model().getModelData(index.row(), index.column()) self._factory.setEditorValue(editor, pyData) class _PropertyNameValidator(QtGui.QValidator): """ Custom validator for property name checking. """ def __init__(self, validationFunction, parent=None): """ Constructor. @param validationFunction: Callable function which gets the property name as input and validates it. @type validationFunction: Callable C{object} """ QtGui.QValidator.__init__(self, parent) self._validationFunction = validationFunction def validate(self, inputString, position): """ Overwrites the default implementation. """ result = QtGui.QValidator.Invalid if self._validationFunction(unicode(inputString)) or len(inputString) == 0: result = QtGui.QValidator.Acceptable return (result, position)
__init__
identifier_name
main.py
# $Filename$ # $Authors$ # Last Changed: $Date$ $Committer$ $Revision-Id$ # # Copyright (c) 2003-2011, German Aerospace Center (DLR) # All rights reserved. # # #Redistribution and use in source and binary forms, with or without #modification, are permitted provided that the following conditions are #met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the # distribution. # # * Neither the name of the German Aerospace Center nor the names of # its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ Implements the main part of the property widget. """ from PyQt4 import QtGui, QtCore from datafinder.core.configuration.properties import constants from datafinder.gui.user.common.widget.property.editors.factory import EditorFactory from datafinder.gui.user.models.properties import PropertiesModel from datafinder.gui.gen.widgets.property_widget_ui import Ui_propertyWidget __version__ = "$Revision-Id:$" class PropertyWidget(QtGui.QWidget, Ui_propertyWidget): """ Implements the main part of the property widget. """ def __init__(self, parent): """ @see: L{QWidget<PyQt4.QtGui.QWidget>} """ QtGui.QWidget.__init__(self, parent) Ui_propertyWidget.__init__(self) self.setupUi(self) self._model = None self.connect(self.addButton, QtCore.SIGNAL("clicked()"), self._addClickedSlot) self.connect(self.editButton, QtCore.SIGNAL("clicked()"), self._editClickedSlot) self.connect(self.clearValueButton, QtCore.SIGNAL("clicked()"), self._clearValueClickedSlot) self.connect(self.deleteButton, QtCore.SIGNAL("clicked()"), self._deleteClickedSlot) self.connect(self.revertButton, QtCore.SIGNAL("clicked()"), self._revertClickedSlot) self.connect(self.refreshButton, QtCore.SIGNAL("clicked()"), self._refreshClickedSlot) def _propertyStateChangedSlot(self): """ Handles changes of properties of the model and updates the button enabled states in accordance to the selection. """ self._updateButtonStates() def _updateSlot(self, index): """ Slot is called when data of property entry has changed. @param index: The index of the selected index. @type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>} """ if index.isValid(): self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect) def _selectionChangedSlot(self, _): """ Slot is called when the selected property entries changed. """ self._updateButtonStates() def _updateButtonStates(self): """ Updates the enabled state of the add, edit, clear, revert and delete buttons in accordance to the selected properties. """ indexes = self.propertiesTableView.selectionModel().selectedIndexes() self._setInitialButtonState() if not self._model.isReadOnly and len(indexes) > 0: canBeCleared = isDeletable = isRevertable = True for index in indexes: if index.isValid(): canBeCleared &= self._model.canBeCleared(index) isDeletable &= self._model.isDeleteable(index) isRevertable &= self._model.isRevertable(index) # Enable / disable buttons if len(indexes) == 1: self.editButton.setEnabled(self._model.flags(indexes[0]) & QtCore.Qt.ItemIsEditable) self.clearValueButton.setEnabled(canBeCleared) self.deleteButton.setEnabled(isDeletable) self.revertButton.setEnabled(isRevertable) self.addButton.setEnabled(True) def _setInitialButtonState(self): """ Sets the initial button state. """ self.addButton.setEnabled(not self._model.isReadOnly) self.editButton.setEnabled(False) self.clearValueButton.setEnabled(False) self.deleteButton.setEnabled(False) self.revertButton.setEnabled(False) def _addClickedSlot(self): """ Slot is called when the add button is used. """ index = self._model.add() self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect) self.addButton.setEnabled(False) # We have to wait until editing is finished to avoid an invalid model self._editClickedSlot() def _editClickedSlot(self): """ Slot is called when the edit button is used. """ index = self.propertiesTableView.selectionModel().currentIndex() if index.isValid(): self.propertiesTableView.edit(index) def _clearValueClickedSlot(self): """ Slot is called when the set empty button is used. """ selectedIndexes = self._determinePropertyRows() for index in selectedIndexes: if index.isValid(): self._model.clearValue(index) def _determinePropertyRows(self): """ Determines the indexes of the property rows selected by the user. """ selectedIndexes = list() rows = list() # used to check for / avoid multiple entries for index in self.propertiesTableView.selectionModel().selectedIndexes(): if not index.row() in rows: selectedIndexes.append(index) rows.append(index.row()) selectedIndexes.sort(cmp=lambda x, y: cmp(x.row(), y.row()), reverse=True) return selectedIndexes def _deleteClickedSlot(self): """ Slot is called when the delete button is used. """ selectedIndexes = self._determinePropertyRows() for index in selectedIndexes: if index.isValid(): self._model.remove(index) def _revertClickedSlot(self): """ Slot is called when the revert button is used. """ selectedIndexes = self._determinePropertyRows() for index in selectedIndexes: if index.isValid(): self._model.revert(index) def _refreshClickedSlot(self): """ Slot is called when the refresh button is used. """ if self._model.dirty: button = QtGui.QMessageBox.information(self, self.tr("Refresh information"), self.tr("All changes will be lost after the update.\n Do you want to continue?"), QtGui.QMessageBox.Yes|QtGui.QMessageBox.No, QtGui.QMessageBox.Yes) if button == QtGui.QMessageBox.No: return self._model.refresh() self.propertiesTableView.setSortingEnabled(True) def _setModel(self, model): """ Sets the model. @param model: Model representing a set of properties. @type model: L{PropertiesModel<datafinder.gui.user.models.properties.PropertiesModel>} """ self._model = model self.propertiesTableView.setModel(model) self._setInitialButtonState() column, order = self._model.sortProperties self.propertiesTableView.horizontalHeader().setSortIndicator(column, order) self.propertiesTableView.setSortingEnabled(True) propertyTypeNames = [constants.STRING_TYPE, constants.DATETIME_TYPE, constants.NUMBER_TYPE, constants.BOOLEAN_TYPE, constants.LIST_TYPE] self.propertiesTableView.setItemDelegate(_PropertyItemDelegate(propertyTypeNames, model, self)) self.connect(self._model, QtCore.SIGNAL("dataChanged(QModelIndex, QModelIndex)"), self._updateSlot) self.connect(self.propertiesTableView.selectionModel(), QtCore.SIGNAL("selectionChanged(QItemSelection, QItemSelection)"), self._selectionChangedSlot) self.connect(self._model, QtCore.SIGNAL(PropertiesModel.PROPERTY_STATE_CHANGED_SIGNAL), self._propertyStateChangedSlot) def _getModel(self): """ Getter of the property model. """ return self._model def activateRefreshButton(self): """ Activates the refresh button. """ self.refreshButton.show() def deactivateRefreshButton(self): """ De-activates the refresh button. """ self.refreshButton.hide() model = property(_getModel, _setModel) class _PropertyItemDelegate(QtGui.QStyledItemDelegate): """
and has to handle the conversion of the editor input to a proper model format. """ def __init__(self, propertyTypes, model, parent=None): """ Constructor. @param propertyTypes: Property types available for this property @type propertyTypes: C{list} of C{unicode} @param parent: Parent object of the delegate. @type parent: L{QWidget<PyQt4.QtGui.QWidget>} """ QtGui.QStyledItemDelegate.__init__(self, parent) self._factory = EditorFactory() self._propertyTypes = propertyTypes self.connect(self, QtCore.SIGNAL("closeEditor(QWidget *, QAbstractItemDelegate::EndEditHint)"), self._handleEditorClosedSlot ) self._currentEditedRow = -1 self._currentEditedColumn = -1 self._model = model def _handleEditorClosedSlot(self, _, hint): """ Handles the closing of editor to remove added property entries without property name. """ if hint == QtGui.QAbstractItemDelegate.RevertModelCache \ and self._currentEditedColumn == 0: index = self._model.index(self._currentEditedRow, self._currentEditedColumn) index.model().setData(index, QtCore.QVariant(None)) def createEditor(self, parent, _, index): """ @see: L{createEditor<PyQt4.QtGui.QItemDelegate.createEditor>} """ self._currentEditedRow = index.row() self._currentEditedColumn = index.column() if index.column() == 0: editor = QtGui.QLineEdit(parent) editor.setValidator(_PropertyNameValidator(index.model().propertyNameValidationFunction, editor)) elif index.column() == 1: editor = QtGui.QComboBox(parent) editor.addItems(self._propertyTypes) valueType = index.model().getModelData(index.row(), 1) if valueType in self._propertyTypes: editor.setCurrentIndex(self._propertyTypes.index(valueType)) elif index.column() == 2: propType = index.model().getModelData(index.row(), 1) restriction = index.model().getModelData(index.row(), 4) pyValue = index.model().getModelData(index.row(), 2) editor = self._factory.createEditor(parent, propType, restriction, pyValue) return editor def setModelData(self, editor, model, index): """ @see: L{setModelData<PyQt4.QtGui.QItemDelegate.setModelData>} """ value = self._factory.getValueFromEditor(editor) if type(value) == list: variantList = list() for item in value: variantList.append(QtCore.QVariant(item)) variant = QtCore.QVariant.fromList(variantList) else: variant = QtCore.QVariant(value) model.setData(index, variant) def setEditorData(self, editor, index): """ L{setEditorData<PyQt4.QtGui.QItemDelegate.setEditorData>} """ pyData = index.model().getModelData(index.row(), index.column()) self._factory.setEditorValue(editor, pyData) class _PropertyNameValidator(QtGui.QValidator): """ Custom validator for property name checking. """ def __init__(self, validationFunction, parent=None): """ Constructor. @param validationFunction: Callable function which gets the property name as input and validates it. @type validationFunction: Callable C{object} """ QtGui.QValidator.__init__(self, parent) self._validationFunction = validationFunction def validate(self, inputString, position): """ Overwrites the default implementation. """ result = QtGui.QValidator.Invalid if self._validationFunction(unicode(inputString)) or len(inputString) == 0: result = QtGui.QValidator.Acceptable return (result, position)
This item delegate has to choose the right editor for the expected property type
random_line_split
main.py
# $Filename$ # $Authors$ # Last Changed: $Date$ $Committer$ $Revision-Id$ # # Copyright (c) 2003-2011, German Aerospace Center (DLR) # All rights reserved. # # #Redistribution and use in source and binary forms, with or without #modification, are permitted provided that the following conditions are #met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the # distribution. # # * Neither the name of the German Aerospace Center nor the names of # its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ Implements the main part of the property widget. """ from PyQt4 import QtGui, QtCore from datafinder.core.configuration.properties import constants from datafinder.gui.user.common.widget.property.editors.factory import EditorFactory from datafinder.gui.user.models.properties import PropertiesModel from datafinder.gui.gen.widgets.property_widget_ui import Ui_propertyWidget __version__ = "$Revision-Id:$" class PropertyWidget(QtGui.QWidget, Ui_propertyWidget): """ Implements the main part of the property widget. """ def __init__(self, parent): """ @see: L{QWidget<PyQt4.QtGui.QWidget>} """ QtGui.QWidget.__init__(self, parent) Ui_propertyWidget.__init__(self) self.setupUi(self) self._model = None self.connect(self.addButton, QtCore.SIGNAL("clicked()"), self._addClickedSlot) self.connect(self.editButton, QtCore.SIGNAL("clicked()"), self._editClickedSlot) self.connect(self.clearValueButton, QtCore.SIGNAL("clicked()"), self._clearValueClickedSlot) self.connect(self.deleteButton, QtCore.SIGNAL("clicked()"), self._deleteClickedSlot) self.connect(self.revertButton, QtCore.SIGNAL("clicked()"), self._revertClickedSlot) self.connect(self.refreshButton, QtCore.SIGNAL("clicked()"), self._refreshClickedSlot) def _propertyStateChangedSlot(self): """ Handles changes of properties of the model and updates the button enabled states in accordance to the selection. """ self._updateButtonStates() def _updateSlot(self, index): """ Slot is called when data of property entry has changed. @param index: The index of the selected index. @type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>} """ if index.isValid(): self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect) def _selectionChangedSlot(self, _): """ Slot is called when the selected property entries changed. """ self._updateButtonStates() def _updateButtonStates(self): """ Updates the enabled state of the add, edit, clear, revert and delete buttons in accordance to the selected properties. """ indexes = self.propertiesTableView.selectionModel().selectedIndexes() self._setInitialButtonState() if not self._model.isReadOnly and len(indexes) > 0: canBeCleared = isDeletable = isRevertable = True for index in indexes: if index.isValid(): canBeCleared &= self._model.canBeCleared(index) isDeletable &= self._model.isDeleteable(index) isRevertable &= self._model.isRevertable(index) # Enable / disable buttons if len(indexes) == 1: self.editButton.setEnabled(self._model.flags(indexes[0]) & QtCore.Qt.ItemIsEditable) self.clearValueButton.setEnabled(canBeCleared) self.deleteButton.setEnabled(isDeletable) self.revertButton.setEnabled(isRevertable) self.addButton.setEnabled(True) def _setInitialButtonState(self): """ Sets the initial button state. """ self.addButton.setEnabled(not self._model.isReadOnly) self.editButton.setEnabled(False) self.clearValueButton.setEnabled(False) self.deleteButton.setEnabled(False) self.revertButton.setEnabled(False) def _addClickedSlot(self): """ Slot is called when the add button is used. """ index = self._model.add() self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect) self.addButton.setEnabled(False) # We have to wait until editing is finished to avoid an invalid model self._editClickedSlot() def _editClickedSlot(self): """ Slot is called when the edit button is used. """ index = self.propertiesTableView.selectionModel().currentIndex() if index.isValid(): self.propertiesTableView.edit(index) def _clearValueClickedSlot(self): """ Slot is called when the set empty button is used. """ selectedIndexes = self._determinePropertyRows() for index in selectedIndexes: if index.isValid(): self._model.clearValue(index) def _determinePropertyRows(self): """ Determines the indexes of the property rows selected by the user. """ selectedIndexes = list() rows = list() # used to check for / avoid multiple entries for index in self.propertiesTableView.selectionModel().selectedIndexes(): if not index.row() in rows: selectedIndexes.append(index) rows.append(index.row()) selectedIndexes.sort(cmp=lambda x, y: cmp(x.row(), y.row()), reverse=True) return selectedIndexes def _deleteClickedSlot(self): """ Slot is called when the delete button is used. """ selectedIndexes = self._determinePropertyRows() for index in selectedIndexes: if index.isValid(): self._model.remove(index) def _revertClickedSlot(self): """ Slot is called when the revert button is used. """ selectedIndexes = self._determinePropertyRows() for index in selectedIndexes:
def _refreshClickedSlot(self): """ Slot is called when the refresh button is used. """ if self._model.dirty: button = QtGui.QMessageBox.information(self, self.tr("Refresh information"), self.tr("All changes will be lost after the update.\n Do you want to continue?"), QtGui.QMessageBox.Yes|QtGui.QMessageBox.No, QtGui.QMessageBox.Yes) if button == QtGui.QMessageBox.No: return self._model.refresh() self.propertiesTableView.setSortingEnabled(True) def _setModel(self, model): """ Sets the model. @param model: Model representing a set of properties. @type model: L{PropertiesModel<datafinder.gui.user.models.properties.PropertiesModel>} """ self._model = model self.propertiesTableView.setModel(model) self._setInitialButtonState() column, order = self._model.sortProperties self.propertiesTableView.horizontalHeader().setSortIndicator(column, order) self.propertiesTableView.setSortingEnabled(True) propertyTypeNames = [constants.STRING_TYPE, constants.DATETIME_TYPE, constants.NUMBER_TYPE, constants.BOOLEAN_TYPE, constants.LIST_TYPE] self.propertiesTableView.setItemDelegate(_PropertyItemDelegate(propertyTypeNames, model, self)) self.connect(self._model, QtCore.SIGNAL("dataChanged(QModelIndex, QModelIndex)"), self._updateSlot) self.connect(self.propertiesTableView.selectionModel(), QtCore.SIGNAL("selectionChanged(QItemSelection, QItemSelection)"), self._selectionChangedSlot) self.connect(self._model, QtCore.SIGNAL(PropertiesModel.PROPERTY_STATE_CHANGED_SIGNAL), self._propertyStateChangedSlot) def _getModel(self): """ Getter of the property model. """ return self._model def activateRefreshButton(self): """ Activates the refresh button. """ self.refreshButton.show() def deactivateRefreshButton(self): """ De-activates the refresh button. """ self.refreshButton.hide() model = property(_getModel, _setModel) class _PropertyItemDelegate(QtGui.QStyledItemDelegate): """ This item delegate has to choose the right editor for the expected property type and has to handle the conversion of the editor input to a proper model format. """ def __init__(self, propertyTypes, model, parent=None): """ Constructor. @param propertyTypes: Property types available for this property @type propertyTypes: C{list} of C{unicode} @param parent: Parent object of the delegate. @type parent: L{QWidget<PyQt4.QtGui.QWidget>} """ QtGui.QStyledItemDelegate.__init__(self, parent) self._factory = EditorFactory() self._propertyTypes = propertyTypes self.connect(self, QtCore.SIGNAL("closeEditor(QWidget *, QAbstractItemDelegate::EndEditHint)"), self._handleEditorClosedSlot ) self._currentEditedRow = -1 self._currentEditedColumn = -1 self._model = model def _handleEditorClosedSlot(self, _, hint): """ Handles the closing of editor to remove added property entries without property name. """ if hint == QtGui.QAbstractItemDelegate.RevertModelCache \ and self._currentEditedColumn == 0: index = self._model.index(self._currentEditedRow, self._currentEditedColumn) index.model().setData(index, QtCore.QVariant(None)) def createEditor(self, parent, _, index): """ @see: L{createEditor<PyQt4.QtGui.QItemDelegate.createEditor>} """ self._currentEditedRow = index.row() self._currentEditedColumn = index.column() if index.column() == 0: editor = QtGui.QLineEdit(parent) editor.setValidator(_PropertyNameValidator(index.model().propertyNameValidationFunction, editor)) elif index.column() == 1: editor = QtGui.QComboBox(parent) editor.addItems(self._propertyTypes) valueType = index.model().getModelData(index.row(), 1) if valueType in self._propertyTypes: editor.setCurrentIndex(self._propertyTypes.index(valueType)) elif index.column() == 2: propType = index.model().getModelData(index.row(), 1) restriction = index.model().getModelData(index.row(), 4) pyValue = index.model().getModelData(index.row(), 2) editor = self._factory.createEditor(parent, propType, restriction, pyValue) return editor def setModelData(self, editor, model, index): """ @see: L{setModelData<PyQt4.QtGui.QItemDelegate.setModelData>} """ value = self._factory.getValueFromEditor(editor) if type(value) == list: variantList = list() for item in value: variantList.append(QtCore.QVariant(item)) variant = QtCore.QVariant.fromList(variantList) else: variant = QtCore.QVariant(value) model.setData(index, variant) def setEditorData(self, editor, index): """ L{setEditorData<PyQt4.QtGui.QItemDelegate.setEditorData>} """ pyData = index.model().getModelData(index.row(), index.column()) self._factory.setEditorValue(editor, pyData) class _PropertyNameValidator(QtGui.QValidator): """ Custom validator for property name checking. """ def __init__(self, validationFunction, parent=None): """ Constructor. @param validationFunction: Callable function which gets the property name as input and validates it. @type validationFunction: Callable C{object} """ QtGui.QValidator.__init__(self, parent) self._validationFunction = validationFunction def validate(self, inputString, position): """ Overwrites the default implementation. """ result = QtGui.QValidator.Invalid if self._validationFunction(unicode(inputString)) or len(inputString) == 0: result = QtGui.QValidator.Acceptable return (result, position)
if index.isValid(): self._model.revert(index)
conditional_block
local.go
package local import ( "archive/tar" "context" "crypto/sha256" "encoding/hex" "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "sync" "time" "github.com/buildpacks/imgutil/layer" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" "github.com/docker/docker/pkg/jsonmessage" "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/pkg/errors" "github.com/buildpacks/imgutil" ) type Image struct { docker client.CommonAPIClient repoName string inspect types.ImageInspect layerPaths []string prevImage *Image // reused layers will be fetched from prevImage downloadBaseOnce *sync.Once } type ImageOption func(*options) error type options struct { platform imgutil.Platform baseImageRepoName string prevImageRepoName string } //WithPreviousImage loads an existing image as a source for reusable layers. //Use with ReuseLayer(). //Ignored if image is not found. func WithPreviousImage(imageName string) ImageOption { return func(i *options) error { i.prevImageRepoName = imageName return nil } } //FromBaseImage loads an existing image as the config and layers for the new image. //Ignored if image is not found. func FromBaseImage(imageName string) ImageOption { return func(i *options) error { i.baseImageRepoName = imageName return nil } } //WithDefaultPlatform provides Architecture/OS/OSVersion defaults for the new image. //Defaults for a new image are ignored when FromBaseImage returns an image. func WithDefaultPlatform(platform imgutil.Platform) ImageOption { return func(i *options) error { i.platform = platform return nil } } //NewImage returns a new Image that can be modified and saved to a registry. func NewImage(repoName string, dockerClient client.CommonAPIClient, ops ...ImageOption) (*Image, error) { imageOpts := &options{} for _, op := range ops { if err := op(imageOpts); err != nil { return nil, err } } platform, err := defaultPlatform(dockerClient) if err != nil { return nil, err } if (imageOpts.platform != imgutil.Platform{}) { if err := validatePlatformOption(platform, imageOpts.platform); err != nil { return nil, err } platform = imageOpts.platform } inspect := defaultInspect(platform) image := &Image{ docker: dockerClient, repoName: repoName, inspect: inspect, layerPaths: make([]string, len(inspect.RootFS.Layers)), downloadBaseOnce: &sync.Once{}, } if imageOpts.prevImageRepoName != "" { if err := processPreviousImageOption(image, imageOpts.prevImageRepoName, platform, dockerClient); err != nil { return nil, err } } if imageOpts.baseImageRepoName != "" { if err := processBaseImageOption(image, imageOpts.baseImageRepoName, platform, dockerClient); err != nil { return nil, err } } if image.inspect.Os == "windows" { if err := prepareNewWindowsImage(image); err != nil { return nil, err } } return image, nil } func validatePlatformOption(defaultPlatform imgutil.Platform, optionPlatform imgutil.Platform) error { if optionPlatform.OS != "" && optionPlatform.OS != defaultPlatform.OS { return fmt.Errorf("invalid os: platform os %q must match the daemon os %q", optionPlatform.OS, defaultPlatform.OS) } return nil } func processPreviousImageOption(image *Image, prevImageRepoName string, platform imgutil.Platform, dockerClient client.CommonAPIClient) error { if _, err := inspectOptionalImage(dockerClient, prevImageRepoName, platform); err != nil { return err } prevImage, err := NewImage(prevImageRepoName, dockerClient, FromBaseImage(prevImageRepoName)) if err != nil { return errors.Wrapf(err, "getting previous image %q", prevImageRepoName) } image.prevImage = prevImage return nil } func processBaseImageOption(image *Image, baseImageRepoName string, platform imgutil.Platform, dockerClient client.CommonAPIClient) error { inspect, err := inspectOptionalImage(dockerClient, baseImageRepoName, platform) if err != nil { return err } image.inspect = inspect image.layerPaths = make([]string, len(image.inspect.RootFS.Layers)) return nil } func prepareNewWindowsImage(image *Image) error { // only append base layer to empty image if len(image.inspect.RootFS.Layers) > 0 { return nil } layerReader, err := layer.WindowsBaseLayer() if err != nil { return err } layerFile, err := ioutil.TempFile("", "imgutil.local.image.windowsbaselayer") if err != nil { return errors.Wrap(err, "creating temp file") } defer layerFile.Close() hasher := sha256.New() multiWriter := io.MultiWriter(layerFile, hasher) if _, err := io.Copy(multiWriter, layerReader); err != nil { return errors.Wrap(err, "copying base layer") } diffID := "sha256:" + hex.EncodeToString(hasher.Sum(nil)) if err := image.AddLayerWithDiffID(layerFile.Name(), diffID); err != nil { return errors.Wrap(err, "adding base layer to image") } return nil } func (i *Image) Label(key string) (string, error) { labels := i.inspect.Config.Labels return labels[key], nil } func (i *Image) Labels() (map[string]string, error) { copiedLabels := make(map[string]string) for i, l := range i.inspect.Config.Labels { copiedLabels[i] = l } return copiedLabels, nil } func (i *Image) Env(key string) (string, error) { for _, envVar := range i.inspect.Config.Env { parts := strings.Split(envVar, "=") if parts[0] == key { return parts[1], nil } } return "", nil } func (i *Image) Entrypoint() ([]string, error) { return i.inspect.Config.Entrypoint, nil } func (i *Image) OS() (string, error) { return i.inspect.Os, nil } func (i *Image) OSVersion() (string, error) { return i.inspect.OsVersion, nil } func (i *Image) Architecture() (string, error) { return i.inspect.Architecture, nil } func (i *Image) Rename(name string) { i.repoName = name } func (i *Image) Name() string { return i.repoName } func (i *Image) Found() bool
func (i *Image) Identifier() (imgutil.Identifier, error) { return IDIdentifier{ ImageID: strings.TrimPrefix(i.inspect.ID, "sha256:"), }, nil } func (i *Image) CreatedAt() (time.Time, error) { createdAtTime := i.inspect.Created createdTime, err := time.Parse(time.RFC3339Nano, createdAtTime) if err != nil { return time.Time{}, err } return createdTime, nil } func (i *Image) Rebase(baseTopLayer string, newBase imgutil.Image) error { ctx := context.Background() // FIND TOP LAYER var keepLayersIdx int for idx, diffID := range i.inspect.RootFS.Layers { if diffID == baseTopLayer { keepLayersIdx = idx + 1 break } } if keepLayersIdx == 0 { return fmt.Errorf("%q not found in %q during rebase", baseTopLayer, i.repoName) } // DOWNLOAD IMAGE if err := i.downloadBaseLayersOnce(); err != nil { return err } // SWITCH BASE LAYERS newBaseInspect, _, err := i.docker.ImageInspectWithRaw(ctx, newBase.Name()) if err != nil { return errors.Wrapf(err, "read config for new base image %q", newBase) } i.inspect.ID = newBaseInspect.ID i.downloadBaseOnce = &sync.Once{} i.inspect.RootFS.Layers = append(newBaseInspect.RootFS.Layers, i.inspect.RootFS.Layers[keepLayersIdx:]...) i.layerPaths = append(make([]string, len(newBaseInspect.RootFS.Layers)), i.layerPaths[keepLayersIdx:]...) return nil } func (i *Image) SetLabel(key, val string) error { if i.inspect.Config.Labels == nil { i.inspect.Config.Labels = map[string]string{} } i.inspect.Config.Labels[key] = val return nil } func (i *Image) SetOS(osVal string) error { if osVal != i.inspect.Os { return fmt.Errorf("invalid os: must match the daemon: %q", i.inspect.Os) } return nil } func (i *Image) SetOSVersion(osVersion string) error { i.inspect.OsVersion = osVersion return nil } func (i *Image) SetArchitecture(architecture string) error { i.inspect.Architecture = architecture return nil } func (i *Image) RemoveLabel(key string) error { delete(i.inspect.Config.Labels, key) return nil } func (i *Image) SetEnv(key, val string) error { ignoreCase := i.inspect.Os == "windows" for idx, kv := range i.inspect.Config.Env { parts := strings.SplitN(kv, "=", 2) foundKey := parts[0] searchKey := key if ignoreCase { foundKey = strings.ToUpper(foundKey) searchKey = strings.ToUpper(searchKey) } if foundKey == searchKey { i.inspect.Config.Env[idx] = fmt.Sprintf("%s=%s", key, val) return nil } } i.inspect.Config.Env = append(i.inspect.Config.Env, fmt.Sprintf("%s=%s", key, val)) return nil } func (i *Image) SetWorkingDir(dir string) error { i.inspect.Config.WorkingDir = dir return nil } func (i *Image) SetEntrypoint(ep ...string) error { i.inspect.Config.Entrypoint = ep return nil } func (i *Image) SetCmd(cmd ...string) error { i.inspect.Config.Cmd = cmd return nil } func (i *Image) TopLayer() (string, error) { all := i.inspect.RootFS.Layers if len(all) == 0 { return "", fmt.Errorf("image %q has no layers", i.repoName) } topLayer := all[len(all)-1] return topLayer, nil } func (i *Image) GetLayer(diffID string) (io.ReadCloser, error) { for l := range i.inspect.RootFS.Layers { if i.inspect.RootFS.Layers[l] != diffID { continue } if i.layerPaths[l] == "" { if err := i.downloadBaseLayersOnce(); err != nil { return nil, err } if i.layerPaths[l] == "" { return nil, fmt.Errorf("fetching layer %q from daemon", diffID) } } return os.Open(i.layerPaths[l]) } return nil, fmt.Errorf("image %q does not contain layer with diff ID %q", i.repoName, diffID) } func (i *Image) AddLayer(path string) error { f, err := os.Open(filepath.Clean(path)) if err != nil { return errors.Wrapf(err, "AddLayer: open layer: %s", path) } defer f.Close() hasher := sha256.New() if _, err := io.Copy(hasher, f); err != nil { return errors.Wrapf(err, "AddLayer: calculate checksum: %s", path) } diffID := "sha256:" + hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))) return i.AddLayerWithDiffID(path, diffID) } func (i *Image) AddLayerWithDiffID(path, diffID string) error { i.inspect.RootFS.Layers = append(i.inspect.RootFS.Layers, diffID) i.layerPaths = append(i.layerPaths, path) return nil } func (i *Image) ReuseLayer(diffID string) error { if i.prevImage == nil { return errors.New("failed to reuse layer because no previous image was provided") } if !i.prevImage.Found() { return fmt.Errorf("failed to reuse layer because previous image %q was not found in daemon", i.prevImage.repoName) } if err := i.prevImage.downloadBaseLayersOnce(); err != nil { return err } for l := range i.prevImage.inspect.RootFS.Layers { if i.prevImage.inspect.RootFS.Layers[l] == diffID { return i.AddLayerWithDiffID(i.prevImage.layerPaths[l], diffID) } } return fmt.Errorf("SHA %s was not found in %s", diffID, i.prevImage.Name()) } func (i *Image) Save(additionalNames ...string) error { // during the first save attempt some layers may be excluded. The docker daemon allows this if the given set // of layers already exists in the daemon in the given order inspect, err := i.doSave() if err != nil { // populate all layer paths and try again without the above performance optimization. if err := i.downloadBaseLayersOnce(); err != nil { return err } inspect, err = i.doSave() if err != nil { saveErr := imgutil.SaveError{} for _, n := range append([]string{i.Name()}, additionalNames...) { saveErr.Errors = append(saveErr.Errors, imgutil.SaveDiagnostic{ImageName: n, Cause: err}) } return saveErr } } i.inspect = inspect var errs []imgutil.SaveDiagnostic for _, n := range append([]string{i.Name()}, additionalNames...) { if err := i.docker.ImageTag(context.Background(), i.inspect.ID, n); err != nil { errs = append(errs, imgutil.SaveDiagnostic{ImageName: n, Cause: err}) } } if len(errs) > 0 { return imgutil.SaveError{Errors: errs} } return nil } func (i *Image) doSave() (types.ImageInspect, error) { ctx := context.Background() done := make(chan error) t, err := name.NewTag(i.repoName, name.WeakValidation) if err != nil { return types.ImageInspect{}, err } // returns valid 'name:tag' appending 'latest', if missing tag repoName := t.Name() pr, pw := io.Pipe() defer pw.Close() go func() { res, err := i.docker.ImageLoad(ctx, pr, true) if err != nil { done <- err return } // only return response error after response is drained and closed responseErr := checkResponseError(res.Body) drainCloseErr := ensureReaderClosed(res.Body) if responseErr != nil { done <- responseErr return } if drainCloseErr != nil { done <- drainCloseErr } done <- nil }() tw := tar.NewWriter(pw) defer tw.Close() configFile, err := i.newConfigFile() if err != nil { return types.ImageInspect{}, errors.Wrap(err, "generating config file") } id := fmt.Sprintf("%x", sha256.Sum256(configFile)) if err := addTextToTar(tw, id+".json", configFile); err != nil { return types.ImageInspect{}, err } var blankIdx int var layerPaths []string for _, path := range i.layerPaths { if path == "" { layerName := fmt.Sprintf("blank_%d", blankIdx) blankIdx++ hdr := &tar.Header{Name: layerName, Mode: 0644, Size: 0} if err := tw.WriteHeader(hdr); err != nil { return types.ImageInspect{}, err } layerPaths = append(layerPaths, layerName) } else { layerName := fmt.Sprintf("/%x.tar", sha256.Sum256([]byte(path))) f, err := os.Open(filepath.Clean(path)) if err != nil { return types.ImageInspect{}, err } defer f.Close() if err := addFileToTar(tw, layerName, f); err != nil { return types.ImageInspect{}, err } f.Close() layerPaths = append(layerPaths, layerName) } } manifest, err := json.Marshal([]map[string]interface{}{ { "Config": id + ".json", "RepoTags": []string{repoName}, "Layers": layerPaths, }, }) if err != nil { return types.ImageInspect{}, err } if err := addTextToTar(tw, "manifest.json", manifest); err != nil { return types.ImageInspect{}, err } tw.Close() pw.Close() err = <-done if err != nil { return types.ImageInspect{}, errors.Wrapf(err, "loading image %q. first error", i.repoName) } inspect, _, err := i.docker.ImageInspectWithRaw(context.Background(), id) if err != nil { if client.IsErrNotFound(err) { return types.ImageInspect{}, errors.Wrapf(err, "saving image %q", i.repoName) } return types.ImageInspect{}, err } return inspect, nil } func (i *Image) newConfigFile() ([]byte, error) { cfg, err := v1Config(i.inspect) if err != nil { return nil, err } return json.Marshal(cfg) } func (i *Image) Delete() error { if !i.Found() { return nil } options := types.ImageRemoveOptions{ Force: true, PruneChildren: true, } _, err := i.docker.ImageRemove(context.Background(), i.inspect.ID, options) return err } func (i *Image) ManifestSize() (int64, error) { return 0, nil } // downloadBaseLayersOnce exports the base image from the daemon and populates layerPaths the first time it is called. // subsequent calls do nothing. func (i *Image) downloadBaseLayersOnce() error { var err error if !i.Found() { return nil } i.downloadBaseOnce.Do(func() { err = i.downloadBaseLayers() }) if err != nil { return errors.Wrap(err, "fetching base layers") } return err } func (i *Image) downloadBaseLayers() error { ctx := context.Background() imageReader, err := i.docker.ImageSave(ctx, []string{i.inspect.ID}) if err != nil { return errors.Wrapf(err, "saving base image with ID %q from the docker daemon", i.inspect.ID) } defer ensureReaderClosed(imageReader) tmpDir, err := ioutil.TempDir("", "imgutil.local.image.") if err != nil { return errors.Wrap(err, "failed to create temp dir") } err = untar(imageReader, tmpDir) if err != nil { return err } mf, err := os.Open(filepath.Clean(filepath.Join(tmpDir, "manifest.json"))) if err != nil { return err } defer mf.Close() var manifest []struct { Config string Layers []string } if err := json.NewDecoder(mf).Decode(&manifest); err != nil { return err } if len(manifest) != 1 { return fmt.Errorf("manifest.json had unexpected number of entries: %d", len(manifest)) } df, err := os.Open(filepath.Clean(filepath.Join(tmpDir, manifest[0].Config))) if err != nil { return err } defer df.Close() var details struct { RootFS struct { DiffIDs []string `json:"diff_ids"` } `json:"rootfs"` } if err = json.NewDecoder(df).Decode(&details); err != nil { return err } for l := range details.RootFS.DiffIDs { i.layerPaths[l] = filepath.Join(tmpDir, manifest[0].Layers[l]) } for l := range i.layerPaths { if i.layerPaths[l] == "" { return errors.New("failed to download all base layers from daemon") } } return nil } func addTextToTar(tw *tar.Writer, name string, contents []byte) error { hdr := &tar.Header{Name: name, Mode: 0644, Size: int64(len(contents))} if err := tw.WriteHeader(hdr); err != nil { return err } _, err := tw.Write(contents) return err } func addFileToTar(tw *tar.Writer, name string, contents *os.File) error { fi, err := contents.Stat() if err != nil { return err } hdr := &tar.Header{Name: name, Mode: 0644, Size: fi.Size()} if err := tw.WriteHeader(hdr); err != nil { return err } _, err = io.Copy(tw, contents) return err } func untar(r io.Reader, dest string) error { tr := tar.NewReader(r) for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive return nil } if err != nil { return err } path := filepath.Join(dest, hdr.Name) switch hdr.Typeflag { case tar.TypeDir: if err := os.MkdirAll(path, hdr.FileInfo().Mode()); err != nil { return err } case tar.TypeReg, tar.TypeRegA: _, err := os.Stat(filepath.Dir(path)) if os.IsNotExist(err) { if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { return err } } fh, err := os.OpenFile(filepath.Clean(path), os.O_CREATE|os.O_WRONLY, hdr.FileInfo().Mode()) if err != nil { return err } if _, err := io.Copy(fh, tr); err != nil { fh.Close() return err } fh.Close() case tar.TypeSymlink: _, err := os.Stat(filepath.Dir(path)) if os.IsNotExist(err) { if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { return err } } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } default: return fmt.Errorf("unknown file type in tar %d", hdr.Typeflag) } } } func inspectOptionalImage(docker client.CommonAPIClient, imageName string, platform imgutil.Platform) (types.ImageInspect, error) { var ( err error inspect types.ImageInspect ) if inspect, _, err = docker.ImageInspectWithRaw(context.Background(), imageName); err != nil { if client.IsErrNotFound(err) { return defaultInspect(platform), nil } return types.ImageInspect{}, errors.Wrapf(err, "verifying image %q", imageName) } return inspect, nil } func defaultInspect(platform imgutil.Platform) types.ImageInspect { return types.ImageInspect{ Os: platform.OS, Architecture: platform.Architecture, OsVersion: platform.OSVersion, Config: &container.Config{}, } } func defaultPlatform(dockerClient client.CommonAPIClient) (imgutil.Platform, error) { daemonInfo, err := dockerClient.Info(context.Background()) if err != nil { return imgutil.Platform{}, err } return imgutil.Platform{ OS: daemonInfo.OSType, Architecture: "amd64", }, nil } func v1Config(inspect types.ImageInspect) (v1.ConfigFile, error) { history := make([]v1.History, len(inspect.RootFS.Layers)) for i := range history { // zero history history[i] = v1.History{ Created: v1.Time{Time: imgutil.NormalizedDateTime}, } } diffIDs := make([]v1.Hash, len(inspect.RootFS.Layers)) for i, layer := range inspect.RootFS.Layers { hash, err := v1.NewHash(layer) if err != nil { return v1.ConfigFile{}, err } diffIDs[i] = hash } exposedPorts := make(map[string]struct{}, len(inspect.Config.ExposedPorts)) for key, val := range inspect.Config.ExposedPorts { exposedPorts[string(key)] = val } var config v1.Config if inspect.Config != nil { var healthcheck *v1.HealthConfig if inspect.Config.Healthcheck != nil { healthcheck = &v1.HealthConfig{ Test: inspect.Config.Healthcheck.Test, Interval: inspect.Config.Healthcheck.Interval, Timeout: inspect.Config.Healthcheck.Timeout, StartPeriod: inspect.Config.Healthcheck.StartPeriod, Retries: inspect.Config.Healthcheck.Retries, } } config = v1.Config{ AttachStderr: inspect.Config.AttachStderr, AttachStdin: inspect.Config.AttachStdin, AttachStdout: inspect.Config.AttachStdout, Cmd: inspect.Config.Cmd, Healthcheck: healthcheck, Domainname: inspect.Config.Domainname, Entrypoint: inspect.Config.Entrypoint, Env: inspect.Config.Env, Hostname: inspect.Config.Hostname, Image: inspect.Config.Image, Labels: inspect.Config.Labels, OnBuild: inspect.Config.OnBuild, OpenStdin: inspect.Config.OpenStdin, StdinOnce: inspect.Config.StdinOnce, Tty: inspect.Config.Tty, User: inspect.Config.User, Volumes: inspect.Config.Volumes, WorkingDir: inspect.Config.WorkingDir, ExposedPorts: exposedPorts, ArgsEscaped: inspect.Config.ArgsEscaped, NetworkDisabled: inspect.Config.NetworkDisabled, MacAddress: inspect.Config.MacAddress, StopSignal: inspect.Config.StopSignal, Shell: inspect.Config.Shell, } } return v1.ConfigFile{ Architecture: inspect.Architecture, Created: v1.Time{Time: imgutil.NormalizedDateTime}, History: history, OS: inspect.Os, OSVersion: inspect.OsVersion, RootFS: v1.RootFS{ Type: "layers", DiffIDs: diffIDs, }, Config: config, }, nil } func checkResponseError(r io.Reader) error { decoder := json.NewDecoder(r) var jsonMessage jsonmessage.JSONMessage if err := decoder.Decode(&jsonMessage); err != nil { return errors.Wrapf(err, "parsing daemon response") } if jsonMessage.Error != nil { return errors.Wrap(jsonMessage.Error, "embedded daemon response") } return nil } // ensureReaderClosed drains and closes and reader, returning the first error func ensureReaderClosed(r io.ReadCloser) error { _, err := io.Copy(ioutil.Discard, r) if closeErr := r.Close(); closeErr != nil && err == nil { err = closeErr } return err }
{ return i.inspect.ID != "" }
identifier_body
local.go
package local import ( "archive/tar" "context" "crypto/sha256" "encoding/hex" "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "sync" "time" "github.com/buildpacks/imgutil/layer" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" "github.com/docker/docker/pkg/jsonmessage" "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/pkg/errors" "github.com/buildpacks/imgutil" ) type Image struct { docker client.CommonAPIClient repoName string inspect types.ImageInspect layerPaths []string prevImage *Image // reused layers will be fetched from prevImage downloadBaseOnce *sync.Once } type ImageOption func(*options) error type options struct { platform imgutil.Platform baseImageRepoName string prevImageRepoName string } //WithPreviousImage loads an existing image as a source for reusable layers. //Use with ReuseLayer(). //Ignored if image is not found. func WithPreviousImage(imageName string) ImageOption { return func(i *options) error { i.prevImageRepoName = imageName return nil } } //FromBaseImage loads an existing image as the config and layers for the new image. //Ignored if image is not found. func FromBaseImage(imageName string) ImageOption { return func(i *options) error { i.baseImageRepoName = imageName return nil } } //WithDefaultPlatform provides Architecture/OS/OSVersion defaults for the new image. //Defaults for a new image are ignored when FromBaseImage returns an image. func WithDefaultPlatform(platform imgutil.Platform) ImageOption { return func(i *options) error { i.platform = platform return nil } } //NewImage returns a new Image that can be modified and saved to a registry. func NewImage(repoName string, dockerClient client.CommonAPIClient, ops ...ImageOption) (*Image, error) { imageOpts := &options{} for _, op := range ops { if err := op(imageOpts); err != nil { return nil, err } } platform, err := defaultPlatform(dockerClient) if err != nil { return nil, err } if (imageOpts.platform != imgutil.Platform{}) { if err := validatePlatformOption(platform, imageOpts.platform); err != nil { return nil, err } platform = imageOpts.platform } inspect := defaultInspect(platform) image := &Image{ docker: dockerClient, repoName: repoName, inspect: inspect, layerPaths: make([]string, len(inspect.RootFS.Layers)), downloadBaseOnce: &sync.Once{}, } if imageOpts.prevImageRepoName != "" { if err := processPreviousImageOption(image, imageOpts.prevImageRepoName, platform, dockerClient); err != nil { return nil, err } } if imageOpts.baseImageRepoName != "" { if err := processBaseImageOption(image, imageOpts.baseImageRepoName, platform, dockerClient); err != nil { return nil, err } } if image.inspect.Os == "windows" { if err := prepareNewWindowsImage(image); err != nil { return nil, err } } return image, nil } func validatePlatformOption(defaultPlatform imgutil.Platform, optionPlatform imgutil.Platform) error { if optionPlatform.OS != "" && optionPlatform.OS != defaultPlatform.OS { return fmt.Errorf("invalid os: platform os %q must match the daemon os %q", optionPlatform.OS, defaultPlatform.OS) } return nil } func processPreviousImageOption(image *Image, prevImageRepoName string, platform imgutil.Platform, dockerClient client.CommonAPIClient) error { if _, err := inspectOptionalImage(dockerClient, prevImageRepoName, platform); err != nil { return err } prevImage, err := NewImage(prevImageRepoName, dockerClient, FromBaseImage(prevImageRepoName)) if err != nil { return errors.Wrapf(err, "getting previous image %q", prevImageRepoName) } image.prevImage = prevImage return nil } func processBaseImageOption(image *Image, baseImageRepoName string, platform imgutil.Platform, dockerClient client.CommonAPIClient) error { inspect, err := inspectOptionalImage(dockerClient, baseImageRepoName, platform) if err != nil { return err } image.inspect = inspect image.layerPaths = make([]string, len(image.inspect.RootFS.Layers)) return nil } func prepareNewWindowsImage(image *Image) error { // only append base layer to empty image if len(image.inspect.RootFS.Layers) > 0 { return nil } layerReader, err := layer.WindowsBaseLayer() if err != nil { return err } layerFile, err := ioutil.TempFile("", "imgutil.local.image.windowsbaselayer") if err != nil { return errors.Wrap(err, "creating temp file") } defer layerFile.Close() hasher := sha256.New() multiWriter := io.MultiWriter(layerFile, hasher) if _, err := io.Copy(multiWriter, layerReader); err != nil { return errors.Wrap(err, "copying base layer") } diffID := "sha256:" + hex.EncodeToString(hasher.Sum(nil)) if err := image.AddLayerWithDiffID(layerFile.Name(), diffID); err != nil { return errors.Wrap(err, "adding base layer to image") } return nil } func (i *Image) Label(key string) (string, error) { labels := i.inspect.Config.Labels return labels[key], nil } func (i *Image) Labels() (map[string]string, error) { copiedLabels := make(map[string]string) for i, l := range i.inspect.Config.Labels { copiedLabels[i] = l } return copiedLabels, nil } func (i *Image) Env(key string) (string, error) { for _, envVar := range i.inspect.Config.Env { parts := strings.Split(envVar, "=") if parts[0] == key { return parts[1], nil } } return "", nil } func (i *Image) Entrypoint() ([]string, error) { return i.inspect.Config.Entrypoint, nil } func (i *Image) OS() (string, error) { return i.inspect.Os, nil } func (i *Image) OSVersion() (string, error) { return i.inspect.OsVersion, nil } func (i *Image) Architecture() (string, error) { return i.inspect.Architecture, nil } func (i *Image) Rename(name string) { i.repoName = name } func (i *Image) Name() string { return i.repoName } func (i *Image) Found() bool { return i.inspect.ID != "" } func (i *Image) Identifier() (imgutil.Identifier, error) { return IDIdentifier{ ImageID: strings.TrimPrefix(i.inspect.ID, "sha256:"), }, nil } func (i *Image) CreatedAt() (time.Time, error) { createdAtTime := i.inspect.Created createdTime, err := time.Parse(time.RFC3339Nano, createdAtTime) if err != nil { return time.Time{}, err } return createdTime, nil } func (i *Image) Rebase(baseTopLayer string, newBase imgutil.Image) error { ctx := context.Background() // FIND TOP LAYER var keepLayersIdx int for idx, diffID := range i.inspect.RootFS.Layers { if diffID == baseTopLayer { keepLayersIdx = idx + 1 break } } if keepLayersIdx == 0 { return fmt.Errorf("%q not found in %q during rebase", baseTopLayer, i.repoName) } // DOWNLOAD IMAGE if err := i.downloadBaseLayersOnce(); err != nil { return err } // SWITCH BASE LAYERS newBaseInspect, _, err := i.docker.ImageInspectWithRaw(ctx, newBase.Name()) if err != nil { return errors.Wrapf(err, "read config for new base image %q", newBase) } i.inspect.ID = newBaseInspect.ID i.downloadBaseOnce = &sync.Once{} i.inspect.RootFS.Layers = append(newBaseInspect.RootFS.Layers, i.inspect.RootFS.Layers[keepLayersIdx:]...) i.layerPaths = append(make([]string, len(newBaseInspect.RootFS.Layers)), i.layerPaths[keepLayersIdx:]...) return nil } func (i *Image) SetLabel(key, val string) error { if i.inspect.Config.Labels == nil { i.inspect.Config.Labels = map[string]string{} } i.inspect.Config.Labels[key] = val return nil } func (i *Image) SetOS(osVal string) error { if osVal != i.inspect.Os { return fmt.Errorf("invalid os: must match the daemon: %q", i.inspect.Os) } return nil } func (i *Image) SetOSVersion(osVersion string) error { i.inspect.OsVersion = osVersion return nil } func (i *Image) SetArchitecture(architecture string) error { i.inspect.Architecture = architecture return nil } func (i *Image) RemoveLabel(key string) error { delete(i.inspect.Config.Labels, key) return nil } func (i *Image) SetEnv(key, val string) error { ignoreCase := i.inspect.Os == "windows" for idx, kv := range i.inspect.Config.Env { parts := strings.SplitN(kv, "=", 2) foundKey := parts[0] searchKey := key if ignoreCase { foundKey = strings.ToUpper(foundKey) searchKey = strings.ToUpper(searchKey) } if foundKey == searchKey { i.inspect.Config.Env[idx] = fmt.Sprintf("%s=%s", key, val) return nil } } i.inspect.Config.Env = append(i.inspect.Config.Env, fmt.Sprintf("%s=%s", key, val)) return nil } func (i *Image) SetWorkingDir(dir string) error { i.inspect.Config.WorkingDir = dir return nil } func (i *Image) SetEntrypoint(ep ...string) error { i.inspect.Config.Entrypoint = ep return nil } func (i *Image) SetCmd(cmd ...string) error { i.inspect.Config.Cmd = cmd return nil } func (i *Image) TopLayer() (string, error) { all := i.inspect.RootFS.Layers if len(all) == 0 { return "", fmt.Errorf("image %q has no layers", i.repoName) } topLayer := all[len(all)-1] return topLayer, nil } func (i *Image) GetLayer(diffID string) (io.ReadCloser, error) { for l := range i.inspect.RootFS.Layers { if i.inspect.RootFS.Layers[l] != diffID { continue } if i.layerPaths[l] == "" { if err := i.downloadBaseLayersOnce(); err != nil { return nil, err } if i.layerPaths[l] == "" { return nil, fmt.Errorf("fetching layer %q from daemon", diffID) } } return os.Open(i.layerPaths[l]) } return nil, fmt.Errorf("image %q does not contain layer with diff ID %q", i.repoName, diffID) } func (i *Image) AddLayer(path string) error { f, err := os.Open(filepath.Clean(path)) if err != nil { return errors.Wrapf(err, "AddLayer: open layer: %s", path) } defer f.Close() hasher := sha256.New() if _, err := io.Copy(hasher, f); err != nil { return errors.Wrapf(err, "AddLayer: calculate checksum: %s", path) } diffID := "sha256:" + hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))) return i.AddLayerWithDiffID(path, diffID) } func (i *Image) AddLayerWithDiffID(path, diffID string) error { i.inspect.RootFS.Layers = append(i.inspect.RootFS.Layers, diffID) i.layerPaths = append(i.layerPaths, path) return nil } func (i *Image) ReuseLayer(diffID string) error { if i.prevImage == nil { return errors.New("failed to reuse layer because no previous image was provided") } if !i.prevImage.Found() { return fmt.Errorf("failed to reuse layer because previous image %q was not found in daemon", i.prevImage.repoName) } if err := i.prevImage.downloadBaseLayersOnce(); err != nil { return err } for l := range i.prevImage.inspect.RootFS.Layers { if i.prevImage.inspect.RootFS.Layers[l] == diffID { return i.AddLayerWithDiffID(i.prevImage.layerPaths[l], diffID) } } return fmt.Errorf("SHA %s was not found in %s", diffID, i.prevImage.Name()) } func (i *Image) Save(additionalNames ...string) error { // during the first save attempt some layers may be excluded. The docker daemon allows this if the given set // of layers already exists in the daemon in the given order inspect, err := i.doSave() if err != nil { // populate all layer paths and try again without the above performance optimization. if err := i.downloadBaseLayersOnce(); err != nil { return err } inspect, err = i.doSave() if err != nil { saveErr := imgutil.SaveError{} for _, n := range append([]string{i.Name()}, additionalNames...) { saveErr.Errors = append(saveErr.Errors, imgutil.SaveDiagnostic{ImageName: n, Cause: err}) } return saveErr } } i.inspect = inspect var errs []imgutil.SaveDiagnostic for _, n := range append([]string{i.Name()}, additionalNames...) { if err := i.docker.ImageTag(context.Background(), i.inspect.ID, n); err != nil { errs = append(errs, imgutil.SaveDiagnostic{ImageName: n, Cause: err}) } } if len(errs) > 0 { return imgutil.SaveError{Errors: errs} } return nil } func (i *Image) doSave() (types.ImageInspect, error) { ctx := context.Background() done := make(chan error) t, err := name.NewTag(i.repoName, name.WeakValidation) if err != nil { return types.ImageInspect{}, err } // returns valid 'name:tag' appending 'latest', if missing tag repoName := t.Name() pr, pw := io.Pipe() defer pw.Close() go func() { res, err := i.docker.ImageLoad(ctx, pr, true) if err != nil { done <- err return } // only return response error after response is drained and closed responseErr := checkResponseError(res.Body) drainCloseErr := ensureReaderClosed(res.Body) if responseErr != nil { done <- responseErr return } if drainCloseErr != nil { done <- drainCloseErr } done <- nil }() tw := tar.NewWriter(pw) defer tw.Close() configFile, err := i.newConfigFile() if err != nil { return types.ImageInspect{}, errors.Wrap(err, "generating config file") } id := fmt.Sprintf("%x", sha256.Sum256(configFile)) if err := addTextToTar(tw, id+".json", configFile); err != nil { return types.ImageInspect{}, err } var blankIdx int var layerPaths []string for _, path := range i.layerPaths { if path == "" { layerName := fmt.Sprintf("blank_%d", blankIdx) blankIdx++ hdr := &tar.Header{Name: layerName, Mode: 0644, Size: 0} if err := tw.WriteHeader(hdr); err != nil { return types.ImageInspect{}, err } layerPaths = append(layerPaths, layerName) } else { layerName := fmt.Sprintf("/%x.tar", sha256.Sum256([]byte(path))) f, err := os.Open(filepath.Clean(path)) if err != nil { return types.ImageInspect{}, err } defer f.Close() if err := addFileToTar(tw, layerName, f); err != nil { return types.ImageInspect{}, err } f.Close() layerPaths = append(layerPaths, layerName) } } manifest, err := json.Marshal([]map[string]interface{}{ { "Config": id + ".json", "RepoTags": []string{repoName}, "Layers": layerPaths, }, }) if err != nil { return types.ImageInspect{}, err } if err := addTextToTar(tw, "manifest.json", manifest); err != nil { return types.ImageInspect{}, err } tw.Close() pw.Close() err = <-done if err != nil { return types.ImageInspect{}, errors.Wrapf(err, "loading image %q. first error", i.repoName) } inspect, _, err := i.docker.ImageInspectWithRaw(context.Background(), id) if err != nil { if client.IsErrNotFound(err) { return types.ImageInspect{}, errors.Wrapf(err, "saving image %q", i.repoName) } return types.ImageInspect{}, err } return inspect, nil } func (i *Image) newConfigFile() ([]byte, error) { cfg, err := v1Config(i.inspect) if err != nil { return nil, err } return json.Marshal(cfg) } func (i *Image) Delete() error { if !i.Found() { return nil } options := types.ImageRemoveOptions{ Force: true, PruneChildren: true, } _, err := i.docker.ImageRemove(context.Background(), i.inspect.ID, options) return err } func (i *Image) ManifestSize() (int64, error) { return 0, nil } // downloadBaseLayersOnce exports the base image from the daemon and populates layerPaths the first time it is called. // subsequent calls do nothing. func (i *Image) downloadBaseLayersOnce() error { var err error if !i.Found() { return nil } i.downloadBaseOnce.Do(func() { err = i.downloadBaseLayers() }) if err != nil { return errors.Wrap(err, "fetching base layers") } return err } func (i *Image) downloadBaseLayers() error { ctx := context.Background() imageReader, err := i.docker.ImageSave(ctx, []string{i.inspect.ID}) if err != nil { return errors.Wrapf(err, "saving base image with ID %q from the docker daemon", i.inspect.ID) } defer ensureReaderClosed(imageReader) tmpDir, err := ioutil.TempDir("", "imgutil.local.image.") if err != nil { return errors.Wrap(err, "failed to create temp dir") } err = untar(imageReader, tmpDir) if err != nil { return err } mf, err := os.Open(filepath.Clean(filepath.Join(tmpDir, "manifest.json"))) if err != nil { return err } defer mf.Close() var manifest []struct { Config string Layers []string } if err := json.NewDecoder(mf).Decode(&manifest); err != nil { return err } if len(manifest) != 1 { return fmt.Errorf("manifest.json had unexpected number of entries: %d", len(manifest)) } df, err := os.Open(filepath.Clean(filepath.Join(tmpDir, manifest[0].Config))) if err != nil { return err } defer df.Close() var details struct { RootFS struct { DiffIDs []string `json:"diff_ids"` } `json:"rootfs"` } if err = json.NewDecoder(df).Decode(&details); err != nil { return err } for l := range details.RootFS.DiffIDs { i.layerPaths[l] = filepath.Join(tmpDir, manifest[0].Layers[l]) } for l := range i.layerPaths { if i.layerPaths[l] == ""
} return nil } func addTextToTar(tw *tar.Writer, name string, contents []byte) error { hdr := &tar.Header{Name: name, Mode: 0644, Size: int64(len(contents))} if err := tw.WriteHeader(hdr); err != nil { return err } _, err := tw.Write(contents) return err } func addFileToTar(tw *tar.Writer, name string, contents *os.File) error { fi, err := contents.Stat() if err != nil { return err } hdr := &tar.Header{Name: name, Mode: 0644, Size: fi.Size()} if err := tw.WriteHeader(hdr); err != nil { return err } _, err = io.Copy(tw, contents) return err } func untar(r io.Reader, dest string) error { tr := tar.NewReader(r) for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive return nil } if err != nil { return err } path := filepath.Join(dest, hdr.Name) switch hdr.Typeflag { case tar.TypeDir: if err := os.MkdirAll(path, hdr.FileInfo().Mode()); err != nil { return err } case tar.TypeReg, tar.TypeRegA: _, err := os.Stat(filepath.Dir(path)) if os.IsNotExist(err) { if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { return err } } fh, err := os.OpenFile(filepath.Clean(path), os.O_CREATE|os.O_WRONLY, hdr.FileInfo().Mode()) if err != nil { return err } if _, err := io.Copy(fh, tr); err != nil { fh.Close() return err } fh.Close() case tar.TypeSymlink: _, err := os.Stat(filepath.Dir(path)) if os.IsNotExist(err) { if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { return err } } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } default: return fmt.Errorf("unknown file type in tar %d", hdr.Typeflag) } } } func inspectOptionalImage(docker client.CommonAPIClient, imageName string, platform imgutil.Platform) (types.ImageInspect, error) { var ( err error inspect types.ImageInspect ) if inspect, _, err = docker.ImageInspectWithRaw(context.Background(), imageName); err != nil { if client.IsErrNotFound(err) { return defaultInspect(platform), nil } return types.ImageInspect{}, errors.Wrapf(err, "verifying image %q", imageName) } return inspect, nil } func defaultInspect(platform imgutil.Platform) types.ImageInspect { return types.ImageInspect{ Os: platform.OS, Architecture: platform.Architecture, OsVersion: platform.OSVersion, Config: &container.Config{}, } } func defaultPlatform(dockerClient client.CommonAPIClient) (imgutil.Platform, error) { daemonInfo, err := dockerClient.Info(context.Background()) if err != nil { return imgutil.Platform{}, err } return imgutil.Platform{ OS: daemonInfo.OSType, Architecture: "amd64", }, nil } func v1Config(inspect types.ImageInspect) (v1.ConfigFile, error) { history := make([]v1.History, len(inspect.RootFS.Layers)) for i := range history { // zero history history[i] = v1.History{ Created: v1.Time{Time: imgutil.NormalizedDateTime}, } } diffIDs := make([]v1.Hash, len(inspect.RootFS.Layers)) for i, layer := range inspect.RootFS.Layers { hash, err := v1.NewHash(layer) if err != nil { return v1.ConfigFile{}, err } diffIDs[i] = hash } exposedPorts := make(map[string]struct{}, len(inspect.Config.ExposedPorts)) for key, val := range inspect.Config.ExposedPorts { exposedPorts[string(key)] = val } var config v1.Config if inspect.Config != nil { var healthcheck *v1.HealthConfig if inspect.Config.Healthcheck != nil { healthcheck = &v1.HealthConfig{ Test: inspect.Config.Healthcheck.Test, Interval: inspect.Config.Healthcheck.Interval, Timeout: inspect.Config.Healthcheck.Timeout, StartPeriod: inspect.Config.Healthcheck.StartPeriod, Retries: inspect.Config.Healthcheck.Retries, } } config = v1.Config{ AttachStderr: inspect.Config.AttachStderr, AttachStdin: inspect.Config.AttachStdin, AttachStdout: inspect.Config.AttachStdout, Cmd: inspect.Config.Cmd, Healthcheck: healthcheck, Domainname: inspect.Config.Domainname, Entrypoint: inspect.Config.Entrypoint, Env: inspect.Config.Env, Hostname: inspect.Config.Hostname, Image: inspect.Config.Image, Labels: inspect.Config.Labels, OnBuild: inspect.Config.OnBuild, OpenStdin: inspect.Config.OpenStdin, StdinOnce: inspect.Config.StdinOnce, Tty: inspect.Config.Tty, User: inspect.Config.User, Volumes: inspect.Config.Volumes, WorkingDir: inspect.Config.WorkingDir, ExposedPorts: exposedPorts, ArgsEscaped: inspect.Config.ArgsEscaped, NetworkDisabled: inspect.Config.NetworkDisabled, MacAddress: inspect.Config.MacAddress, StopSignal: inspect.Config.StopSignal, Shell: inspect.Config.Shell, } } return v1.ConfigFile{ Architecture: inspect.Architecture, Created: v1.Time{Time: imgutil.NormalizedDateTime}, History: history, OS: inspect.Os, OSVersion: inspect.OsVersion, RootFS: v1.RootFS{ Type: "layers", DiffIDs: diffIDs, }, Config: config, }, nil } func checkResponseError(r io.Reader) error { decoder := json.NewDecoder(r) var jsonMessage jsonmessage.JSONMessage if err := decoder.Decode(&jsonMessage); err != nil { return errors.Wrapf(err, "parsing daemon response") } if jsonMessage.Error != nil { return errors.Wrap(jsonMessage.Error, "embedded daemon response") } return nil } // ensureReaderClosed drains and closes and reader, returning the first error func ensureReaderClosed(r io.ReadCloser) error { _, err := io.Copy(ioutil.Discard, r) if closeErr := r.Close(); closeErr != nil && err == nil { err = closeErr } return err }
{ return errors.New("failed to download all base layers from daemon") }
conditional_block
local.go
package local import ( "archive/tar" "context" "crypto/sha256" "encoding/hex" "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "sync" "time" "github.com/buildpacks/imgutil/layer" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" "github.com/docker/docker/pkg/jsonmessage" "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/pkg/errors" "github.com/buildpacks/imgutil" ) type Image struct { docker client.CommonAPIClient repoName string inspect types.ImageInspect layerPaths []string prevImage *Image // reused layers will be fetched from prevImage downloadBaseOnce *sync.Once } type ImageOption func(*options) error type options struct { platform imgutil.Platform baseImageRepoName string prevImageRepoName string } //WithPreviousImage loads an existing image as a source for reusable layers. //Use with ReuseLayer(). //Ignored if image is not found. func WithPreviousImage(imageName string) ImageOption { return func(i *options) error { i.prevImageRepoName = imageName return nil } } //FromBaseImage loads an existing image as the config and layers for the new image. //Ignored if image is not found. func FromBaseImage(imageName string) ImageOption { return func(i *options) error { i.baseImageRepoName = imageName return nil } } //WithDefaultPlatform provides Architecture/OS/OSVersion defaults for the new image. //Defaults for a new image are ignored when FromBaseImage returns an image. func WithDefaultPlatform(platform imgutil.Platform) ImageOption { return func(i *options) error { i.platform = platform return nil } } //NewImage returns a new Image that can be modified and saved to a registry. func NewImage(repoName string, dockerClient client.CommonAPIClient, ops ...ImageOption) (*Image, error) { imageOpts := &options{} for _, op := range ops { if err := op(imageOpts); err != nil { return nil, err } } platform, err := defaultPlatform(dockerClient) if err != nil { return nil, err } if (imageOpts.platform != imgutil.Platform{}) { if err := validatePlatformOption(platform, imageOpts.platform); err != nil { return nil, err } platform = imageOpts.platform } inspect := defaultInspect(platform) image := &Image{ docker: dockerClient, repoName: repoName, inspect: inspect, layerPaths: make([]string, len(inspect.RootFS.Layers)), downloadBaseOnce: &sync.Once{}, } if imageOpts.prevImageRepoName != "" { if err := processPreviousImageOption(image, imageOpts.prevImageRepoName, platform, dockerClient); err != nil { return nil, err } } if imageOpts.baseImageRepoName != "" { if err := processBaseImageOption(image, imageOpts.baseImageRepoName, platform, dockerClient); err != nil { return nil, err } } if image.inspect.Os == "windows" { if err := prepareNewWindowsImage(image); err != nil { return nil, err } } return image, nil } func
(defaultPlatform imgutil.Platform, optionPlatform imgutil.Platform) error { if optionPlatform.OS != "" && optionPlatform.OS != defaultPlatform.OS { return fmt.Errorf("invalid os: platform os %q must match the daemon os %q", optionPlatform.OS, defaultPlatform.OS) } return nil } func processPreviousImageOption(image *Image, prevImageRepoName string, platform imgutil.Platform, dockerClient client.CommonAPIClient) error { if _, err := inspectOptionalImage(dockerClient, prevImageRepoName, platform); err != nil { return err } prevImage, err := NewImage(prevImageRepoName, dockerClient, FromBaseImage(prevImageRepoName)) if err != nil { return errors.Wrapf(err, "getting previous image %q", prevImageRepoName) } image.prevImage = prevImage return nil } func processBaseImageOption(image *Image, baseImageRepoName string, platform imgutil.Platform, dockerClient client.CommonAPIClient) error { inspect, err := inspectOptionalImage(dockerClient, baseImageRepoName, platform) if err != nil { return err } image.inspect = inspect image.layerPaths = make([]string, len(image.inspect.RootFS.Layers)) return nil } func prepareNewWindowsImage(image *Image) error { // only append base layer to empty image if len(image.inspect.RootFS.Layers) > 0 { return nil } layerReader, err := layer.WindowsBaseLayer() if err != nil { return err } layerFile, err := ioutil.TempFile("", "imgutil.local.image.windowsbaselayer") if err != nil { return errors.Wrap(err, "creating temp file") } defer layerFile.Close() hasher := sha256.New() multiWriter := io.MultiWriter(layerFile, hasher) if _, err := io.Copy(multiWriter, layerReader); err != nil { return errors.Wrap(err, "copying base layer") } diffID := "sha256:" + hex.EncodeToString(hasher.Sum(nil)) if err := image.AddLayerWithDiffID(layerFile.Name(), diffID); err != nil { return errors.Wrap(err, "adding base layer to image") } return nil } func (i *Image) Label(key string) (string, error) { labels := i.inspect.Config.Labels return labels[key], nil } func (i *Image) Labels() (map[string]string, error) { copiedLabels := make(map[string]string) for i, l := range i.inspect.Config.Labels { copiedLabels[i] = l } return copiedLabels, nil } func (i *Image) Env(key string) (string, error) { for _, envVar := range i.inspect.Config.Env { parts := strings.Split(envVar, "=") if parts[0] == key { return parts[1], nil } } return "", nil } func (i *Image) Entrypoint() ([]string, error) { return i.inspect.Config.Entrypoint, nil } func (i *Image) OS() (string, error) { return i.inspect.Os, nil } func (i *Image) OSVersion() (string, error) { return i.inspect.OsVersion, nil } func (i *Image) Architecture() (string, error) { return i.inspect.Architecture, nil } func (i *Image) Rename(name string) { i.repoName = name } func (i *Image) Name() string { return i.repoName } func (i *Image) Found() bool { return i.inspect.ID != "" } func (i *Image) Identifier() (imgutil.Identifier, error) { return IDIdentifier{ ImageID: strings.TrimPrefix(i.inspect.ID, "sha256:"), }, nil } func (i *Image) CreatedAt() (time.Time, error) { createdAtTime := i.inspect.Created createdTime, err := time.Parse(time.RFC3339Nano, createdAtTime) if err != nil { return time.Time{}, err } return createdTime, nil } func (i *Image) Rebase(baseTopLayer string, newBase imgutil.Image) error { ctx := context.Background() // FIND TOP LAYER var keepLayersIdx int for idx, diffID := range i.inspect.RootFS.Layers { if diffID == baseTopLayer { keepLayersIdx = idx + 1 break } } if keepLayersIdx == 0 { return fmt.Errorf("%q not found in %q during rebase", baseTopLayer, i.repoName) } // DOWNLOAD IMAGE if err := i.downloadBaseLayersOnce(); err != nil { return err } // SWITCH BASE LAYERS newBaseInspect, _, err := i.docker.ImageInspectWithRaw(ctx, newBase.Name()) if err != nil { return errors.Wrapf(err, "read config for new base image %q", newBase) } i.inspect.ID = newBaseInspect.ID i.downloadBaseOnce = &sync.Once{} i.inspect.RootFS.Layers = append(newBaseInspect.RootFS.Layers, i.inspect.RootFS.Layers[keepLayersIdx:]...) i.layerPaths = append(make([]string, len(newBaseInspect.RootFS.Layers)), i.layerPaths[keepLayersIdx:]...) return nil } func (i *Image) SetLabel(key, val string) error { if i.inspect.Config.Labels == nil { i.inspect.Config.Labels = map[string]string{} } i.inspect.Config.Labels[key] = val return nil } func (i *Image) SetOS(osVal string) error { if osVal != i.inspect.Os { return fmt.Errorf("invalid os: must match the daemon: %q", i.inspect.Os) } return nil } func (i *Image) SetOSVersion(osVersion string) error { i.inspect.OsVersion = osVersion return nil } func (i *Image) SetArchitecture(architecture string) error { i.inspect.Architecture = architecture return nil } func (i *Image) RemoveLabel(key string) error { delete(i.inspect.Config.Labels, key) return nil } func (i *Image) SetEnv(key, val string) error { ignoreCase := i.inspect.Os == "windows" for idx, kv := range i.inspect.Config.Env { parts := strings.SplitN(kv, "=", 2) foundKey := parts[0] searchKey := key if ignoreCase { foundKey = strings.ToUpper(foundKey) searchKey = strings.ToUpper(searchKey) } if foundKey == searchKey { i.inspect.Config.Env[idx] = fmt.Sprintf("%s=%s", key, val) return nil } } i.inspect.Config.Env = append(i.inspect.Config.Env, fmt.Sprintf("%s=%s", key, val)) return nil } func (i *Image) SetWorkingDir(dir string) error { i.inspect.Config.WorkingDir = dir return nil } func (i *Image) SetEntrypoint(ep ...string) error { i.inspect.Config.Entrypoint = ep return nil } func (i *Image) SetCmd(cmd ...string) error { i.inspect.Config.Cmd = cmd return nil } func (i *Image) TopLayer() (string, error) { all := i.inspect.RootFS.Layers if len(all) == 0 { return "", fmt.Errorf("image %q has no layers", i.repoName) } topLayer := all[len(all)-1] return topLayer, nil } func (i *Image) GetLayer(diffID string) (io.ReadCloser, error) { for l := range i.inspect.RootFS.Layers { if i.inspect.RootFS.Layers[l] != diffID { continue } if i.layerPaths[l] == "" { if err := i.downloadBaseLayersOnce(); err != nil { return nil, err } if i.layerPaths[l] == "" { return nil, fmt.Errorf("fetching layer %q from daemon", diffID) } } return os.Open(i.layerPaths[l]) } return nil, fmt.Errorf("image %q does not contain layer with diff ID %q", i.repoName, diffID) } func (i *Image) AddLayer(path string) error { f, err := os.Open(filepath.Clean(path)) if err != nil { return errors.Wrapf(err, "AddLayer: open layer: %s", path) } defer f.Close() hasher := sha256.New() if _, err := io.Copy(hasher, f); err != nil { return errors.Wrapf(err, "AddLayer: calculate checksum: %s", path) } diffID := "sha256:" + hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))) return i.AddLayerWithDiffID(path, diffID) } func (i *Image) AddLayerWithDiffID(path, diffID string) error { i.inspect.RootFS.Layers = append(i.inspect.RootFS.Layers, diffID) i.layerPaths = append(i.layerPaths, path) return nil } func (i *Image) ReuseLayer(diffID string) error { if i.prevImage == nil { return errors.New("failed to reuse layer because no previous image was provided") } if !i.prevImage.Found() { return fmt.Errorf("failed to reuse layer because previous image %q was not found in daemon", i.prevImage.repoName) } if err := i.prevImage.downloadBaseLayersOnce(); err != nil { return err } for l := range i.prevImage.inspect.RootFS.Layers { if i.prevImage.inspect.RootFS.Layers[l] == diffID { return i.AddLayerWithDiffID(i.prevImage.layerPaths[l], diffID) } } return fmt.Errorf("SHA %s was not found in %s", diffID, i.prevImage.Name()) } func (i *Image) Save(additionalNames ...string) error { // during the first save attempt some layers may be excluded. The docker daemon allows this if the given set // of layers already exists in the daemon in the given order inspect, err := i.doSave() if err != nil { // populate all layer paths and try again without the above performance optimization. if err := i.downloadBaseLayersOnce(); err != nil { return err } inspect, err = i.doSave() if err != nil { saveErr := imgutil.SaveError{} for _, n := range append([]string{i.Name()}, additionalNames...) { saveErr.Errors = append(saveErr.Errors, imgutil.SaveDiagnostic{ImageName: n, Cause: err}) } return saveErr } } i.inspect = inspect var errs []imgutil.SaveDiagnostic for _, n := range append([]string{i.Name()}, additionalNames...) { if err := i.docker.ImageTag(context.Background(), i.inspect.ID, n); err != nil { errs = append(errs, imgutil.SaveDiagnostic{ImageName: n, Cause: err}) } } if len(errs) > 0 { return imgutil.SaveError{Errors: errs} } return nil } func (i *Image) doSave() (types.ImageInspect, error) { ctx := context.Background() done := make(chan error) t, err := name.NewTag(i.repoName, name.WeakValidation) if err != nil { return types.ImageInspect{}, err } // returns valid 'name:tag' appending 'latest', if missing tag repoName := t.Name() pr, pw := io.Pipe() defer pw.Close() go func() { res, err := i.docker.ImageLoad(ctx, pr, true) if err != nil { done <- err return } // only return response error after response is drained and closed responseErr := checkResponseError(res.Body) drainCloseErr := ensureReaderClosed(res.Body) if responseErr != nil { done <- responseErr return } if drainCloseErr != nil { done <- drainCloseErr } done <- nil }() tw := tar.NewWriter(pw) defer tw.Close() configFile, err := i.newConfigFile() if err != nil { return types.ImageInspect{}, errors.Wrap(err, "generating config file") } id := fmt.Sprintf("%x", sha256.Sum256(configFile)) if err := addTextToTar(tw, id+".json", configFile); err != nil { return types.ImageInspect{}, err } var blankIdx int var layerPaths []string for _, path := range i.layerPaths { if path == "" { layerName := fmt.Sprintf("blank_%d", blankIdx) blankIdx++ hdr := &tar.Header{Name: layerName, Mode: 0644, Size: 0} if err := tw.WriteHeader(hdr); err != nil { return types.ImageInspect{}, err } layerPaths = append(layerPaths, layerName) } else { layerName := fmt.Sprintf("/%x.tar", sha256.Sum256([]byte(path))) f, err := os.Open(filepath.Clean(path)) if err != nil { return types.ImageInspect{}, err } defer f.Close() if err := addFileToTar(tw, layerName, f); err != nil { return types.ImageInspect{}, err } f.Close() layerPaths = append(layerPaths, layerName) } } manifest, err := json.Marshal([]map[string]interface{}{ { "Config": id + ".json", "RepoTags": []string{repoName}, "Layers": layerPaths, }, }) if err != nil { return types.ImageInspect{}, err } if err := addTextToTar(tw, "manifest.json", manifest); err != nil { return types.ImageInspect{}, err } tw.Close() pw.Close() err = <-done if err != nil { return types.ImageInspect{}, errors.Wrapf(err, "loading image %q. first error", i.repoName) } inspect, _, err := i.docker.ImageInspectWithRaw(context.Background(), id) if err != nil { if client.IsErrNotFound(err) { return types.ImageInspect{}, errors.Wrapf(err, "saving image %q", i.repoName) } return types.ImageInspect{}, err } return inspect, nil } func (i *Image) newConfigFile() ([]byte, error) { cfg, err := v1Config(i.inspect) if err != nil { return nil, err } return json.Marshal(cfg) } func (i *Image) Delete() error { if !i.Found() { return nil } options := types.ImageRemoveOptions{ Force: true, PruneChildren: true, } _, err := i.docker.ImageRemove(context.Background(), i.inspect.ID, options) return err } func (i *Image) ManifestSize() (int64, error) { return 0, nil } // downloadBaseLayersOnce exports the base image from the daemon and populates layerPaths the first time it is called. // subsequent calls do nothing. func (i *Image) downloadBaseLayersOnce() error { var err error if !i.Found() { return nil } i.downloadBaseOnce.Do(func() { err = i.downloadBaseLayers() }) if err != nil { return errors.Wrap(err, "fetching base layers") } return err } func (i *Image) downloadBaseLayers() error { ctx := context.Background() imageReader, err := i.docker.ImageSave(ctx, []string{i.inspect.ID}) if err != nil { return errors.Wrapf(err, "saving base image with ID %q from the docker daemon", i.inspect.ID) } defer ensureReaderClosed(imageReader) tmpDir, err := ioutil.TempDir("", "imgutil.local.image.") if err != nil { return errors.Wrap(err, "failed to create temp dir") } err = untar(imageReader, tmpDir) if err != nil { return err } mf, err := os.Open(filepath.Clean(filepath.Join(tmpDir, "manifest.json"))) if err != nil { return err } defer mf.Close() var manifest []struct { Config string Layers []string } if err := json.NewDecoder(mf).Decode(&manifest); err != nil { return err } if len(manifest) != 1 { return fmt.Errorf("manifest.json had unexpected number of entries: %d", len(manifest)) } df, err := os.Open(filepath.Clean(filepath.Join(tmpDir, manifest[0].Config))) if err != nil { return err } defer df.Close() var details struct { RootFS struct { DiffIDs []string `json:"diff_ids"` } `json:"rootfs"` } if err = json.NewDecoder(df).Decode(&details); err != nil { return err } for l := range details.RootFS.DiffIDs { i.layerPaths[l] = filepath.Join(tmpDir, manifest[0].Layers[l]) } for l := range i.layerPaths { if i.layerPaths[l] == "" { return errors.New("failed to download all base layers from daemon") } } return nil } func addTextToTar(tw *tar.Writer, name string, contents []byte) error { hdr := &tar.Header{Name: name, Mode: 0644, Size: int64(len(contents))} if err := tw.WriteHeader(hdr); err != nil { return err } _, err := tw.Write(contents) return err } func addFileToTar(tw *tar.Writer, name string, contents *os.File) error { fi, err := contents.Stat() if err != nil { return err } hdr := &tar.Header{Name: name, Mode: 0644, Size: fi.Size()} if err := tw.WriteHeader(hdr); err != nil { return err } _, err = io.Copy(tw, contents) return err } func untar(r io.Reader, dest string) error { tr := tar.NewReader(r) for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive return nil } if err != nil { return err } path := filepath.Join(dest, hdr.Name) switch hdr.Typeflag { case tar.TypeDir: if err := os.MkdirAll(path, hdr.FileInfo().Mode()); err != nil { return err } case tar.TypeReg, tar.TypeRegA: _, err := os.Stat(filepath.Dir(path)) if os.IsNotExist(err) { if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { return err } } fh, err := os.OpenFile(filepath.Clean(path), os.O_CREATE|os.O_WRONLY, hdr.FileInfo().Mode()) if err != nil { return err } if _, err := io.Copy(fh, tr); err != nil { fh.Close() return err } fh.Close() case tar.TypeSymlink: _, err := os.Stat(filepath.Dir(path)) if os.IsNotExist(err) { if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { return err } } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } default: return fmt.Errorf("unknown file type in tar %d", hdr.Typeflag) } } } func inspectOptionalImage(docker client.CommonAPIClient, imageName string, platform imgutil.Platform) (types.ImageInspect, error) { var ( err error inspect types.ImageInspect ) if inspect, _, err = docker.ImageInspectWithRaw(context.Background(), imageName); err != nil { if client.IsErrNotFound(err) { return defaultInspect(platform), nil } return types.ImageInspect{}, errors.Wrapf(err, "verifying image %q", imageName) } return inspect, nil } func defaultInspect(platform imgutil.Platform) types.ImageInspect { return types.ImageInspect{ Os: platform.OS, Architecture: platform.Architecture, OsVersion: platform.OSVersion, Config: &container.Config{}, } } func defaultPlatform(dockerClient client.CommonAPIClient) (imgutil.Platform, error) { daemonInfo, err := dockerClient.Info(context.Background()) if err != nil { return imgutil.Platform{}, err } return imgutil.Platform{ OS: daemonInfo.OSType, Architecture: "amd64", }, nil } func v1Config(inspect types.ImageInspect) (v1.ConfigFile, error) { history := make([]v1.History, len(inspect.RootFS.Layers)) for i := range history { // zero history history[i] = v1.History{ Created: v1.Time{Time: imgutil.NormalizedDateTime}, } } diffIDs := make([]v1.Hash, len(inspect.RootFS.Layers)) for i, layer := range inspect.RootFS.Layers { hash, err := v1.NewHash(layer) if err != nil { return v1.ConfigFile{}, err } diffIDs[i] = hash } exposedPorts := make(map[string]struct{}, len(inspect.Config.ExposedPorts)) for key, val := range inspect.Config.ExposedPorts { exposedPorts[string(key)] = val } var config v1.Config if inspect.Config != nil { var healthcheck *v1.HealthConfig if inspect.Config.Healthcheck != nil { healthcheck = &v1.HealthConfig{ Test: inspect.Config.Healthcheck.Test, Interval: inspect.Config.Healthcheck.Interval, Timeout: inspect.Config.Healthcheck.Timeout, StartPeriod: inspect.Config.Healthcheck.StartPeriod, Retries: inspect.Config.Healthcheck.Retries, } } config = v1.Config{ AttachStderr: inspect.Config.AttachStderr, AttachStdin: inspect.Config.AttachStdin, AttachStdout: inspect.Config.AttachStdout, Cmd: inspect.Config.Cmd, Healthcheck: healthcheck, Domainname: inspect.Config.Domainname, Entrypoint: inspect.Config.Entrypoint, Env: inspect.Config.Env, Hostname: inspect.Config.Hostname, Image: inspect.Config.Image, Labels: inspect.Config.Labels, OnBuild: inspect.Config.OnBuild, OpenStdin: inspect.Config.OpenStdin, StdinOnce: inspect.Config.StdinOnce, Tty: inspect.Config.Tty, User: inspect.Config.User, Volumes: inspect.Config.Volumes, WorkingDir: inspect.Config.WorkingDir, ExposedPorts: exposedPorts, ArgsEscaped: inspect.Config.ArgsEscaped, NetworkDisabled: inspect.Config.NetworkDisabled, MacAddress: inspect.Config.MacAddress, StopSignal: inspect.Config.StopSignal, Shell: inspect.Config.Shell, } } return v1.ConfigFile{ Architecture: inspect.Architecture, Created: v1.Time{Time: imgutil.NormalizedDateTime}, History: history, OS: inspect.Os, OSVersion: inspect.OsVersion, RootFS: v1.RootFS{ Type: "layers", DiffIDs: diffIDs, }, Config: config, }, nil } func checkResponseError(r io.Reader) error { decoder := json.NewDecoder(r) var jsonMessage jsonmessage.JSONMessage if err := decoder.Decode(&jsonMessage); err != nil { return errors.Wrapf(err, "parsing daemon response") } if jsonMessage.Error != nil { return errors.Wrap(jsonMessage.Error, "embedded daemon response") } return nil } // ensureReaderClosed drains and closes and reader, returning the first error func ensureReaderClosed(r io.ReadCloser) error { _, err := io.Copy(ioutil.Discard, r) if closeErr := r.Close(); closeErr != nil && err == nil { err = closeErr } return err }
validatePlatformOption
identifier_name
local.go
package local import ( "archive/tar" "context" "crypto/sha256" "encoding/hex" "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "sync" "time" "github.com/buildpacks/imgutil/layer" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" "github.com/docker/docker/pkg/jsonmessage" "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/pkg/errors" "github.com/buildpacks/imgutil" ) type Image struct { docker client.CommonAPIClient repoName string inspect types.ImageInspect layerPaths []string prevImage *Image // reused layers will be fetched from prevImage downloadBaseOnce *sync.Once } type ImageOption func(*options) error type options struct { platform imgutil.Platform baseImageRepoName string prevImageRepoName string } //WithPreviousImage loads an existing image as a source for reusable layers. //Use with ReuseLayer(). //Ignored if image is not found. func WithPreviousImage(imageName string) ImageOption { return func(i *options) error { i.prevImageRepoName = imageName return nil } } //FromBaseImage loads an existing image as the config and layers for the new image. //Ignored if image is not found. func FromBaseImage(imageName string) ImageOption { return func(i *options) error { i.baseImageRepoName = imageName return nil } } //WithDefaultPlatform provides Architecture/OS/OSVersion defaults for the new image. //Defaults for a new image are ignored when FromBaseImage returns an image. func WithDefaultPlatform(platform imgutil.Platform) ImageOption { return func(i *options) error { i.platform = platform return nil } } //NewImage returns a new Image that can be modified and saved to a registry. func NewImage(repoName string, dockerClient client.CommonAPIClient, ops ...ImageOption) (*Image, error) { imageOpts := &options{} for _, op := range ops { if err := op(imageOpts); err != nil { return nil, err } } platform, err := defaultPlatform(dockerClient) if err != nil { return nil, err } if (imageOpts.platform != imgutil.Platform{}) { if err := validatePlatformOption(platform, imageOpts.platform); err != nil { return nil, err } platform = imageOpts.platform } inspect := defaultInspect(platform) image := &Image{ docker: dockerClient, repoName: repoName, inspect: inspect, layerPaths: make([]string, len(inspect.RootFS.Layers)), downloadBaseOnce: &sync.Once{}, } if imageOpts.prevImageRepoName != "" { if err := processPreviousImageOption(image, imageOpts.prevImageRepoName, platform, dockerClient); err != nil { return nil, err } } if imageOpts.baseImageRepoName != "" { if err := processBaseImageOption(image, imageOpts.baseImageRepoName, platform, dockerClient); err != nil { return nil, err } } if image.inspect.Os == "windows" { if err := prepareNewWindowsImage(image); err != nil { return nil, err } } return image, nil } func validatePlatformOption(defaultPlatform imgutil.Platform, optionPlatform imgutil.Platform) error { if optionPlatform.OS != "" && optionPlatform.OS != defaultPlatform.OS { return fmt.Errorf("invalid os: platform os %q must match the daemon os %q", optionPlatform.OS, defaultPlatform.OS) } return nil } func processPreviousImageOption(image *Image, prevImageRepoName string, platform imgutil.Platform, dockerClient client.CommonAPIClient) error { if _, err := inspectOptionalImage(dockerClient, prevImageRepoName, platform); err != nil { return err } prevImage, err := NewImage(prevImageRepoName, dockerClient, FromBaseImage(prevImageRepoName)) if err != nil { return errors.Wrapf(err, "getting previous image %q", prevImageRepoName) } image.prevImage = prevImage return nil } func processBaseImageOption(image *Image, baseImageRepoName string, platform imgutil.Platform, dockerClient client.CommonAPIClient) error { inspect, err := inspectOptionalImage(dockerClient, baseImageRepoName, platform) if err != nil { return err } image.inspect = inspect image.layerPaths = make([]string, len(image.inspect.RootFS.Layers)) return nil } func prepareNewWindowsImage(image *Image) error { // only append base layer to empty image if len(image.inspect.RootFS.Layers) > 0 { return nil } layerReader, err := layer.WindowsBaseLayer() if err != nil { return err } layerFile, err := ioutil.TempFile("", "imgutil.local.image.windowsbaselayer") if err != nil { return errors.Wrap(err, "creating temp file") } defer layerFile.Close() hasher := sha256.New() multiWriter := io.MultiWriter(layerFile, hasher) if _, err := io.Copy(multiWriter, layerReader); err != nil { return errors.Wrap(err, "copying base layer") } diffID := "sha256:" + hex.EncodeToString(hasher.Sum(nil)) if err := image.AddLayerWithDiffID(layerFile.Name(), diffID); err != nil { return errors.Wrap(err, "adding base layer to image") } return nil } func (i *Image) Label(key string) (string, error) { labels := i.inspect.Config.Labels return labels[key], nil } func (i *Image) Labels() (map[string]string, error) { copiedLabels := make(map[string]string) for i, l := range i.inspect.Config.Labels { copiedLabels[i] = l } return copiedLabels, nil } func (i *Image) Env(key string) (string, error) { for _, envVar := range i.inspect.Config.Env { parts := strings.Split(envVar, "=") if parts[0] == key { return parts[1], nil } } return "", nil } func (i *Image) Entrypoint() ([]string, error) { return i.inspect.Config.Entrypoint, nil } func (i *Image) OS() (string, error) { return i.inspect.Os, nil } func (i *Image) OSVersion() (string, error) { return i.inspect.OsVersion, nil } func (i *Image) Architecture() (string, error) { return i.inspect.Architecture, nil } func (i *Image) Rename(name string) { i.repoName = name } func (i *Image) Name() string { return i.repoName } func (i *Image) Found() bool { return i.inspect.ID != "" } func (i *Image) Identifier() (imgutil.Identifier, error) { return IDIdentifier{ ImageID: strings.TrimPrefix(i.inspect.ID, "sha256:"), }, nil } func (i *Image) CreatedAt() (time.Time, error) { createdAtTime := i.inspect.Created createdTime, err := time.Parse(time.RFC3339Nano, createdAtTime) if err != nil { return time.Time{}, err } return createdTime, nil } func (i *Image) Rebase(baseTopLayer string, newBase imgutil.Image) error { ctx := context.Background() // FIND TOP LAYER var keepLayersIdx int for idx, diffID := range i.inspect.RootFS.Layers { if diffID == baseTopLayer { keepLayersIdx = idx + 1 break } } if keepLayersIdx == 0 { return fmt.Errorf("%q not found in %q during rebase", baseTopLayer, i.repoName) } // DOWNLOAD IMAGE if err := i.downloadBaseLayersOnce(); err != nil { return err } // SWITCH BASE LAYERS newBaseInspect, _, err := i.docker.ImageInspectWithRaw(ctx, newBase.Name()) if err != nil { return errors.Wrapf(err, "read config for new base image %q", newBase) } i.inspect.ID = newBaseInspect.ID i.downloadBaseOnce = &sync.Once{} i.inspect.RootFS.Layers = append(newBaseInspect.RootFS.Layers, i.inspect.RootFS.Layers[keepLayersIdx:]...) i.layerPaths = append(make([]string, len(newBaseInspect.RootFS.Layers)), i.layerPaths[keepLayersIdx:]...) return nil } func (i *Image) SetLabel(key, val string) error { if i.inspect.Config.Labels == nil { i.inspect.Config.Labels = map[string]string{} } i.inspect.Config.Labels[key] = val return nil } func (i *Image) SetOS(osVal string) error { if osVal != i.inspect.Os { return fmt.Errorf("invalid os: must match the daemon: %q", i.inspect.Os) } return nil } func (i *Image) SetOSVersion(osVersion string) error { i.inspect.OsVersion = osVersion return nil } func (i *Image) SetArchitecture(architecture string) error { i.inspect.Architecture = architecture return nil } func (i *Image) RemoveLabel(key string) error {
ignoreCase := i.inspect.Os == "windows" for idx, kv := range i.inspect.Config.Env { parts := strings.SplitN(kv, "=", 2) foundKey := parts[0] searchKey := key if ignoreCase { foundKey = strings.ToUpper(foundKey) searchKey = strings.ToUpper(searchKey) } if foundKey == searchKey { i.inspect.Config.Env[idx] = fmt.Sprintf("%s=%s", key, val) return nil } } i.inspect.Config.Env = append(i.inspect.Config.Env, fmt.Sprintf("%s=%s", key, val)) return nil } func (i *Image) SetWorkingDir(dir string) error { i.inspect.Config.WorkingDir = dir return nil } func (i *Image) SetEntrypoint(ep ...string) error { i.inspect.Config.Entrypoint = ep return nil } func (i *Image) SetCmd(cmd ...string) error { i.inspect.Config.Cmd = cmd return nil } func (i *Image) TopLayer() (string, error) { all := i.inspect.RootFS.Layers if len(all) == 0 { return "", fmt.Errorf("image %q has no layers", i.repoName) } topLayer := all[len(all)-1] return topLayer, nil } func (i *Image) GetLayer(diffID string) (io.ReadCloser, error) { for l := range i.inspect.RootFS.Layers { if i.inspect.RootFS.Layers[l] != diffID { continue } if i.layerPaths[l] == "" { if err := i.downloadBaseLayersOnce(); err != nil { return nil, err } if i.layerPaths[l] == "" { return nil, fmt.Errorf("fetching layer %q from daemon", diffID) } } return os.Open(i.layerPaths[l]) } return nil, fmt.Errorf("image %q does not contain layer with diff ID %q", i.repoName, diffID) } func (i *Image) AddLayer(path string) error { f, err := os.Open(filepath.Clean(path)) if err != nil { return errors.Wrapf(err, "AddLayer: open layer: %s", path) } defer f.Close() hasher := sha256.New() if _, err := io.Copy(hasher, f); err != nil { return errors.Wrapf(err, "AddLayer: calculate checksum: %s", path) } diffID := "sha256:" + hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))) return i.AddLayerWithDiffID(path, diffID) } func (i *Image) AddLayerWithDiffID(path, diffID string) error { i.inspect.RootFS.Layers = append(i.inspect.RootFS.Layers, diffID) i.layerPaths = append(i.layerPaths, path) return nil } func (i *Image) ReuseLayer(diffID string) error { if i.prevImage == nil { return errors.New("failed to reuse layer because no previous image was provided") } if !i.prevImage.Found() { return fmt.Errorf("failed to reuse layer because previous image %q was not found in daemon", i.prevImage.repoName) } if err := i.prevImage.downloadBaseLayersOnce(); err != nil { return err } for l := range i.prevImage.inspect.RootFS.Layers { if i.prevImage.inspect.RootFS.Layers[l] == diffID { return i.AddLayerWithDiffID(i.prevImage.layerPaths[l], diffID) } } return fmt.Errorf("SHA %s was not found in %s", diffID, i.prevImage.Name()) } func (i *Image) Save(additionalNames ...string) error { // during the first save attempt some layers may be excluded. The docker daemon allows this if the given set // of layers already exists in the daemon in the given order inspect, err := i.doSave() if err != nil { // populate all layer paths and try again without the above performance optimization. if err := i.downloadBaseLayersOnce(); err != nil { return err } inspect, err = i.doSave() if err != nil { saveErr := imgutil.SaveError{} for _, n := range append([]string{i.Name()}, additionalNames...) { saveErr.Errors = append(saveErr.Errors, imgutil.SaveDiagnostic{ImageName: n, Cause: err}) } return saveErr } } i.inspect = inspect var errs []imgutil.SaveDiagnostic for _, n := range append([]string{i.Name()}, additionalNames...) { if err := i.docker.ImageTag(context.Background(), i.inspect.ID, n); err != nil { errs = append(errs, imgutil.SaveDiagnostic{ImageName: n, Cause: err}) } } if len(errs) > 0 { return imgutil.SaveError{Errors: errs} } return nil } func (i *Image) doSave() (types.ImageInspect, error) { ctx := context.Background() done := make(chan error) t, err := name.NewTag(i.repoName, name.WeakValidation) if err != nil { return types.ImageInspect{}, err } // returns valid 'name:tag' appending 'latest', if missing tag repoName := t.Name() pr, pw := io.Pipe() defer pw.Close() go func() { res, err := i.docker.ImageLoad(ctx, pr, true) if err != nil { done <- err return } // only return response error after response is drained and closed responseErr := checkResponseError(res.Body) drainCloseErr := ensureReaderClosed(res.Body) if responseErr != nil { done <- responseErr return } if drainCloseErr != nil { done <- drainCloseErr } done <- nil }() tw := tar.NewWriter(pw) defer tw.Close() configFile, err := i.newConfigFile() if err != nil { return types.ImageInspect{}, errors.Wrap(err, "generating config file") } id := fmt.Sprintf("%x", sha256.Sum256(configFile)) if err := addTextToTar(tw, id+".json", configFile); err != nil { return types.ImageInspect{}, err } var blankIdx int var layerPaths []string for _, path := range i.layerPaths { if path == "" { layerName := fmt.Sprintf("blank_%d", blankIdx) blankIdx++ hdr := &tar.Header{Name: layerName, Mode: 0644, Size: 0} if err := tw.WriteHeader(hdr); err != nil { return types.ImageInspect{}, err } layerPaths = append(layerPaths, layerName) } else { layerName := fmt.Sprintf("/%x.tar", sha256.Sum256([]byte(path))) f, err := os.Open(filepath.Clean(path)) if err != nil { return types.ImageInspect{}, err } defer f.Close() if err := addFileToTar(tw, layerName, f); err != nil { return types.ImageInspect{}, err } f.Close() layerPaths = append(layerPaths, layerName) } } manifest, err := json.Marshal([]map[string]interface{}{ { "Config": id + ".json", "RepoTags": []string{repoName}, "Layers": layerPaths, }, }) if err != nil { return types.ImageInspect{}, err } if err := addTextToTar(tw, "manifest.json", manifest); err != nil { return types.ImageInspect{}, err } tw.Close() pw.Close() err = <-done if err != nil { return types.ImageInspect{}, errors.Wrapf(err, "loading image %q. first error", i.repoName) } inspect, _, err := i.docker.ImageInspectWithRaw(context.Background(), id) if err != nil { if client.IsErrNotFound(err) { return types.ImageInspect{}, errors.Wrapf(err, "saving image %q", i.repoName) } return types.ImageInspect{}, err } return inspect, nil } func (i *Image) newConfigFile() ([]byte, error) { cfg, err := v1Config(i.inspect) if err != nil { return nil, err } return json.Marshal(cfg) } func (i *Image) Delete() error { if !i.Found() { return nil } options := types.ImageRemoveOptions{ Force: true, PruneChildren: true, } _, err := i.docker.ImageRemove(context.Background(), i.inspect.ID, options) return err } func (i *Image) ManifestSize() (int64, error) { return 0, nil } // downloadBaseLayersOnce exports the base image from the daemon and populates layerPaths the first time it is called. // subsequent calls do nothing. func (i *Image) downloadBaseLayersOnce() error { var err error if !i.Found() { return nil } i.downloadBaseOnce.Do(func() { err = i.downloadBaseLayers() }) if err != nil { return errors.Wrap(err, "fetching base layers") } return err } func (i *Image) downloadBaseLayers() error { ctx := context.Background() imageReader, err := i.docker.ImageSave(ctx, []string{i.inspect.ID}) if err != nil { return errors.Wrapf(err, "saving base image with ID %q from the docker daemon", i.inspect.ID) } defer ensureReaderClosed(imageReader) tmpDir, err := ioutil.TempDir("", "imgutil.local.image.") if err != nil { return errors.Wrap(err, "failed to create temp dir") } err = untar(imageReader, tmpDir) if err != nil { return err } mf, err := os.Open(filepath.Clean(filepath.Join(tmpDir, "manifest.json"))) if err != nil { return err } defer mf.Close() var manifest []struct { Config string Layers []string } if err := json.NewDecoder(mf).Decode(&manifest); err != nil { return err } if len(manifest) != 1 { return fmt.Errorf("manifest.json had unexpected number of entries: %d", len(manifest)) } df, err := os.Open(filepath.Clean(filepath.Join(tmpDir, manifest[0].Config))) if err != nil { return err } defer df.Close() var details struct { RootFS struct { DiffIDs []string `json:"diff_ids"` } `json:"rootfs"` } if err = json.NewDecoder(df).Decode(&details); err != nil { return err } for l := range details.RootFS.DiffIDs { i.layerPaths[l] = filepath.Join(tmpDir, manifest[0].Layers[l]) } for l := range i.layerPaths { if i.layerPaths[l] == "" { return errors.New("failed to download all base layers from daemon") } } return nil } func addTextToTar(tw *tar.Writer, name string, contents []byte) error { hdr := &tar.Header{Name: name, Mode: 0644, Size: int64(len(contents))} if err := tw.WriteHeader(hdr); err != nil { return err } _, err := tw.Write(contents) return err } func addFileToTar(tw *tar.Writer, name string, contents *os.File) error { fi, err := contents.Stat() if err != nil { return err } hdr := &tar.Header{Name: name, Mode: 0644, Size: fi.Size()} if err := tw.WriteHeader(hdr); err != nil { return err } _, err = io.Copy(tw, contents) return err } func untar(r io.Reader, dest string) error { tr := tar.NewReader(r) for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive return nil } if err != nil { return err } path := filepath.Join(dest, hdr.Name) switch hdr.Typeflag { case tar.TypeDir: if err := os.MkdirAll(path, hdr.FileInfo().Mode()); err != nil { return err } case tar.TypeReg, tar.TypeRegA: _, err := os.Stat(filepath.Dir(path)) if os.IsNotExist(err) { if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { return err } } fh, err := os.OpenFile(filepath.Clean(path), os.O_CREATE|os.O_WRONLY, hdr.FileInfo().Mode()) if err != nil { return err } if _, err := io.Copy(fh, tr); err != nil { fh.Close() return err } fh.Close() case tar.TypeSymlink: _, err := os.Stat(filepath.Dir(path)) if os.IsNotExist(err) { if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { return err } } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } default: return fmt.Errorf("unknown file type in tar %d", hdr.Typeflag) } } } func inspectOptionalImage(docker client.CommonAPIClient, imageName string, platform imgutil.Platform) (types.ImageInspect, error) { var ( err error inspect types.ImageInspect ) if inspect, _, err = docker.ImageInspectWithRaw(context.Background(), imageName); err != nil { if client.IsErrNotFound(err) { return defaultInspect(platform), nil } return types.ImageInspect{}, errors.Wrapf(err, "verifying image %q", imageName) } return inspect, nil } func defaultInspect(platform imgutil.Platform) types.ImageInspect { return types.ImageInspect{ Os: platform.OS, Architecture: platform.Architecture, OsVersion: platform.OSVersion, Config: &container.Config{}, } } func defaultPlatform(dockerClient client.CommonAPIClient) (imgutil.Platform, error) { daemonInfo, err := dockerClient.Info(context.Background()) if err != nil { return imgutil.Platform{}, err } return imgutil.Platform{ OS: daemonInfo.OSType, Architecture: "amd64", }, nil } func v1Config(inspect types.ImageInspect) (v1.ConfigFile, error) { history := make([]v1.History, len(inspect.RootFS.Layers)) for i := range history { // zero history history[i] = v1.History{ Created: v1.Time{Time: imgutil.NormalizedDateTime}, } } diffIDs := make([]v1.Hash, len(inspect.RootFS.Layers)) for i, layer := range inspect.RootFS.Layers { hash, err := v1.NewHash(layer) if err != nil { return v1.ConfigFile{}, err } diffIDs[i] = hash } exposedPorts := make(map[string]struct{}, len(inspect.Config.ExposedPorts)) for key, val := range inspect.Config.ExposedPorts { exposedPorts[string(key)] = val } var config v1.Config if inspect.Config != nil { var healthcheck *v1.HealthConfig if inspect.Config.Healthcheck != nil { healthcheck = &v1.HealthConfig{ Test: inspect.Config.Healthcheck.Test, Interval: inspect.Config.Healthcheck.Interval, Timeout: inspect.Config.Healthcheck.Timeout, StartPeriod: inspect.Config.Healthcheck.StartPeriod, Retries: inspect.Config.Healthcheck.Retries, } } config = v1.Config{ AttachStderr: inspect.Config.AttachStderr, AttachStdin: inspect.Config.AttachStdin, AttachStdout: inspect.Config.AttachStdout, Cmd: inspect.Config.Cmd, Healthcheck: healthcheck, Domainname: inspect.Config.Domainname, Entrypoint: inspect.Config.Entrypoint, Env: inspect.Config.Env, Hostname: inspect.Config.Hostname, Image: inspect.Config.Image, Labels: inspect.Config.Labels, OnBuild: inspect.Config.OnBuild, OpenStdin: inspect.Config.OpenStdin, StdinOnce: inspect.Config.StdinOnce, Tty: inspect.Config.Tty, User: inspect.Config.User, Volumes: inspect.Config.Volumes, WorkingDir: inspect.Config.WorkingDir, ExposedPorts: exposedPorts, ArgsEscaped: inspect.Config.ArgsEscaped, NetworkDisabled: inspect.Config.NetworkDisabled, MacAddress: inspect.Config.MacAddress, StopSignal: inspect.Config.StopSignal, Shell: inspect.Config.Shell, } } return v1.ConfigFile{ Architecture: inspect.Architecture, Created: v1.Time{Time: imgutil.NormalizedDateTime}, History: history, OS: inspect.Os, OSVersion: inspect.OsVersion, RootFS: v1.RootFS{ Type: "layers", DiffIDs: diffIDs, }, Config: config, }, nil } func checkResponseError(r io.Reader) error { decoder := json.NewDecoder(r) var jsonMessage jsonmessage.JSONMessage if err := decoder.Decode(&jsonMessage); err != nil { return errors.Wrapf(err, "parsing daemon response") } if jsonMessage.Error != nil { return errors.Wrap(jsonMessage.Error, "embedded daemon response") } return nil } // ensureReaderClosed drains and closes and reader, returning the first error func ensureReaderClosed(r io.ReadCloser) error { _, err := io.Copy(ioutil.Discard, r) if closeErr := r.Close(); closeErr != nil && err == nil { err = closeErr } return err }
delete(i.inspect.Config.Labels, key) return nil } func (i *Image) SetEnv(key, val string) error {
random_line_split
_profile.py
import inspect import json from contextlib import contextmanager from time import perf_counter from typing import Optional, Callable from ._backend import Backend, BACKENDS, _DEFAULT class BackendCall: def __init__(self, start: float, stop: float, backend: 'ProfilingBackend', function_name): self._start = start self._stop = stop self._backend = backend self._function_name = function_name self._args = {"Backend": backend.name} def __repr__(self): return f"{1000 * self._duration:.2f} ms {self._function_name}" def print(self, include_parents, depth, min_duration, code_col, code_len): if self._duration >= min_duration: print(f"{' ' * depth}{1000 * self._duration:.2f} ms {self._backend}.{self._function_name}") @property def _name(self): return repr(self) @property def _duration(self): return self._stop - self._start def trace_json_events(self, include_parents) -> list: backend_index = self._backend._index name = self._function_name return [ { 'name': name, 'ph': 'X', 'pid': 1, 'tid': backend_index+1, 'ts': int(round(self._start * 1000000)), 'dur': int(round((self._stop - self._start) * 1000000)), 'args': self._args } ] def call_count(self) -> int: return 1 def add_arg(self, key, value): assert key not in self._args self._args[key] = value class ExtCall: """ Function invocation that is not a Backend method but internally calls Backend methods. """ def __init__(self, parent: 'ExtCall' or None, name: str, level: int, function: str, code_context: list or None, file_name: str, line_number: int): """ Args: parent: Parent call. name: Name of this call, see `ExtCall.determine_name()`. level: Number of parent stack items including this one. """ self._parent = parent if parent is None: self._parents = () else: self._parents = parent._parents + (parent,) self._children = [] # BackendCalls and ExtCalls self._converted = False self._name = name self._level = level self._function = function self._code_context = code_context self._file_name = file_name self._line_number = line_number def common_call(self, stack: list): """ Returns the deepest ExtCall in the hierarchy of this call that contains `stack`. """ if self._parent is None: return self if len(stack) < self._level: return self._parent.common_call(stack) for i in range(self._level - 1): if self._parents[i+1]._function != stack[-1-i].function: return self._parents[i] return self def add(self, child): self._children.append(child) @staticmethod def determine_name(info): fun = info.function if 'self' in info.frame.f_locals: if fun == '__init__': return f"{type(info.frame.f_locals['self']).__name__}()" return f"{type(info.frame.f_locals['self']).__name__}.{fun}" if 'phi/math' in info.filename or 'phi\\math' in info.filename: return f"math.{fun}" else: return fun @property def _start(self): return self._children[0]._start @property def _stop(self): return self._children[-1]._stop @property def _duration(self): return sum(c._duration for c in self._children) def call_count(self) -> int: return sum(child.call_count() for child in self._children) def __repr__(self): if not self._converted: if self._parent is None: return "/" return f"{self._name} ({self._level})" else: context = self._code_context return f"sum {1000 * self._duration:.2f} ms {context}" def __len__(self): return len(self._children) def _empty_parent_count(self): for i, parent in enumerate(reversed(self._parents)): if len(parent._children) > 1: return i return len(self._parents) def _eff_parent_count(self): return len([p for p in self._parents if len(p._children) > 1]) def
(self): parent = self._parent while parent._parent is not None: if len(parent._children) > 1: return parent parent = parent._parent return parent def _calling_code(self, backtrack=0): if self._level > backtrack + 1: call: ExtCall = self._parents[-backtrack-1] return call._code_context[0].strip(), call._file_name, call._function, call._line_number else: return "", "", "", -1 def print(self, include_parents=(), depth=0, min_duration=0., code_col=80, code_len=50): if self._duration < min_duration: return if len(self._children) == 1 and isinstance(self._children[0], ExtCall): self._children[0].print(include_parents + ((self,) if self._parent is not None else ()), depth, min_duration, code_col, code_len) else: funcs = [par._name for par in include_parents] + [self._name] text = f"{'. ' * depth}-> {' -> '.join(funcs)} ({1000 * self._duration:.2f} ms)" if self._level > len(include_parents)+1: code = self._calling_code(backtrack=len(include_parents))[0] if len(code) > code_len: code = code[:code_len-3] + "..." text += " " + "." * max(0, (code_col - len(text))) + " > " + code print(text) for child in self._children: child.print((), depth + 1, min_duration, code_col, code_len) def children_to_properties(self) -> dict: result = {} for child in self._children: name = f"{len(result)} {child._name}" if len(self._children) <= 10 else f"{len(result):02d} {child._name}" while isinstance(child, ExtCall) and len(child) == 1: child = child._children[0] name += " -> " + child._name result[name] = child if isinstance(child, ExtCall): child.children_to_properties() # finalize for name, child in result.items(): setattr(self, name, child) self._converted = True return result def trace_json_events(self, include_parents=()) -> list: if len(self._children) == 1: return self._children[0].trace_json_events(include_parents + (self,)) else: name = ' -> '.join([par._name for par in include_parents] + [self._name]) eff_parent_count = self._eff_parent_count() calling_code, calling_filename, calling_function, lineno = self._calling_code(backtrack=self._empty_parent_count()) result = [ { 'name': name, 'ph': "X", # complete event 'pid': 0, 'tid': eff_parent_count, 'ts': int(self._start * 1000000), 'dur': int((self._stop - self._start) * 1000000), 'args': { "Calling code snippet": calling_code, "Called by": f"{calling_function}() in {calling_filename}, line {lineno}", "Active time (backend calls)": f"{self._duration * 1000:.2f} ms ({round(100 * self._duration / self._closest_non_trivial_parent()._duration):.0f}% of parent, {100 * self._duration / (self._stop - self._start):.1f}% efficiency)", "Backend calls": f"{self.call_count()} ({round(100 * self.call_count() / self._closest_non_trivial_parent().call_count()):.0f}% of parent)" } } ] for child in self._children: result.extend(child.trace_json_events(())) return result class Profile: """ Stores information about calls to backends and their timing. Profile may be created through `profile()` or `profile_function()`. Profiles can be printed or saved to disc. """ def __init__(self, trace: bool, backends: tuple or list, subtract_trace_time: bool): self._start = perf_counter() self._stop = None self._root = ExtCall(None, "", 0, "", "", "", -1) self._last_ext_call = self._root self._messages = [] self._trace = trace self._backend_calls = [] self._retime_index = -1 self._accumulating = False self._backends = backends self._subtract_trace_time = subtract_trace_time self._total_trace_time = 0 def _add_call(self, backend_call: BackendCall, args: tuple, kwargs: dict, result): if self._retime_index >= 0: prev_call = self._backend_calls[self._retime_index] assert prev_call._function_name == backend_call._function_name if self._accumulating: prev_call._start += backend_call._start prev_call._stop += backend_call._stop else: prev_call._start = backend_call._start prev_call._stop = backend_call._stop self._retime_index = (self._retime_index + 1) % len(self._backend_calls) else: self._backend_calls.append(backend_call) args = {i: arg for i, arg in enumerate(args)} args.update(kwargs) backend_call.add_arg("Inputs", _format_values(args, backend_call._backend)) if isinstance(result, (tuple, list)): backend_call.add_arg("Outputs", _format_values({i: res for i, res in enumerate(result)}, backend_call._backend)) else: backend_call.add_arg("Outputs", _format_values({0: result}, backend_call._backend)) if self._trace: stack = inspect.stack()[2:] call = self._last_ext_call.common_call(stack) for i in range(call._level, len(stack)): stack_frame = stack[len(stack) - i - 1] name = ExtCall.determine_name(stack_frame) # if len(stack) - i > 1 else "" sub_call = ExtCall(call, name, i + 1, stack_frame.function, stack_frame.code_context, stack_frame.filename, stack_frame.lineno) call.add(sub_call) call = sub_call call.add(backend_call) self._last_ext_call = call if self._subtract_trace_time: delta_trace_time = perf_counter() - backend_call._stop backend_call._start -= self._total_trace_time backend_call._stop -= self._total_trace_time self._total_trace_time += delta_trace_time def _finish(self): self._stop = perf_counter() self._children_to_properties() @property def duration(self) -> float: """ Total time passed from creation of the profile to the end of the last operation. """ return self._stop - self._start if self._stop is not None else None def print(self, min_duration=1e-3, code_col=80, code_len=50): """ Prints this profile to the console. Args: min_duration: Hides elements with less time spent on backend calls than `min_duration` (seconds) code_col: Formatting option for where the context code is printed. code_len: Formatting option for cropping the context code """ print(f"Profile: {self.duration:.4f} seconds total. Skipping elements shorter than {1000 * min_duration:.2f} ms") if self._messages: print("External profiling:") for message in self._messages: print(f" {message}") print() self._root.print(min_duration=min_duration, code_col=code_col, code_len=code_len) def save(self, json_file: str): """ Saves this profile to disc using the *trace event format* described at https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit This file can be viewed with external applications such as Google chrome. Args: json_file: filename """ data = [ {'name': "process_name", 'ph': 'M', 'pid': 0, 'tid': 0, "args": {"name": "0 Python calls"}}, {'name': "process_name", 'ph': 'M', 'pid': 1, 'tid': 1, "args": {"name": "1 Operations"}}, ] + [ {'name': "thread_name", 'ph': 'M', 'pid': 1, 'tid': i + 1, "args": {"name": backend.name}} for i, backend in enumerate(self._backends) ] if self._trace: if len(self._root._children) > 0: data.extend(self._root.trace_json_events()) else: data.extend(sum([call.trace_json_events(()) for call in self._backend_calls], [])) with open(json_file, 'w') as file: json.dump(data, file) save_trace = save def _children_to_properties(self): children = self._root.children_to_properties() for name, child in children.items(): setattr(self, name, child) def add_external_message(self, message: str): """ Stores an external message in this profile. External messages are printed in `Profile.print()`. """ self._messages.append(message) @contextmanager def retime(self): """ To be used in `with` statements, `with prof.retime(): ...`. Updates this profile by running the same operations again but without tracing. This gives a much better indication of the true timing. The code within the `with` block must perform the same operations as the code that created this profile. *Warning:* Internal caching may reduce the number of operations after the first time a function is called. To prevent this, run the function before profiling it, see `warmup` in `profile_function()`. """ self._retime_index = 0 restore_data = _start_profiling(self, self._backends) try: yield None finally: _stop_profiling(self, *restore_data) assert self._retime_index == 0, f"Number of calls during retime did not match original profile, originally {len(self._backend_calls)}, now {self._retime_index}, " self._retime_index = -1 @contextmanager def _accumulate_average(self, n): self._retime_index = 0 self._accumulating = True restore_data = _start_profiling(self, self._backends) try: yield None finally: _stop_profiling(self, *restore_data) assert self._retime_index == 0, f"Number of calls during retime did not match original profile, originally {len(self._backend_calls)}, now {self._retime_index}, " self._retime_index = -1 for call in self._backend_calls: call._start /= n call._stop /= n self._accumulating = False def _format_values(values: dict, backend): def format_val(value): if isinstance(value, str): return f'"{value}"' if isinstance(value, (int, float, complex, bool)): return value if isinstance(value, (tuple, list)): return str([format_val(v) for v in value]) try: shape = backend.shape(value) dtype = backend.dtype(value) try: shape = (int(dim) if dim is not None else '?' for dim in shape) except Exception: pass return f"{tuple(shape)}, {dtype}" except BaseException: return str(value) lines = [f"{key}: {format_val(val)}" for key, val in values.items()] return "\n".join(lines) class ProfilingBackend: def __init__(self, prof: Profile, backend: Backend, index: int): self._backend = backend self._profile = prof self._index = index # non-profiling methods self.name = backend.name self.combine_types = backend.combine_types self.auto_cast = backend.auto_cast self.is_tensor = backend.is_tensor self.is_available = backend.is_available self.shape = backend.shape self.staticshape = backend.staticshape self.ndims = backend.ndims self.dtype = backend.dtype self.expand_dims = backend.expand_dims self.reshape = backend.reshape self.supports = backend.supports # TODO strided slice does not go through backend atm # profiling methods for item_name in dir(backend): item = getattr(backend, item_name) if callable(item) and not hasattr(self, item_name): def context(item=item, item_name=item_name, profiling_backend=self): def call_fun(*args, **kwargs): start = perf_counter() result = item(*args, **kwargs) stop = perf_counter() prof._add_call(BackendCall(start, stop, profiling_backend, item_name), args, kwargs, result) return result return call_fun setattr(self, item_name, context()) def call(self, f: Callable, *args, name=None): start = perf_counter() result = f(*args) self._backend.block_until_ready(result) stop = perf_counter() self._profile._add_call(BackendCall(start, stop, self, name), args, {}, result) return result def __repr__(self): return f"profile[{self._backend}]" def __enter__(self): _DEFAULT.append(self) def __exit__(self, exc_type, exc_val, exc_tb): _DEFAULT.pop(-1) def __eq__(self, other): return other is self or other is self._backend def __hash__(self): return hash(self._backend) _PROFILE = [] @contextmanager def profile(backends=None, trace=True, subtract_trace_time=True, save: str or None = None) -> Profile: """ To be used in `with` statements, `with math.backend.profile() as prof: ...`. Creates a `Profile` for the code executed within the context by tracking calls to the `backends` and optionally tracing the call. Args: backends: List of backends to profile, `None` to profile all. trace: Whether to perform a full stack trace for each backend call. If true, groups backend calls by function. subtract_trace_time: If True, subtracts the time it took to trace the call stack from the event times save: (Optional) File path to save the profile to. This will call `Profile.save()`. Returns: Created `Profile` """ backends = BACKENDS if backends is None else backends prof = Profile(trace, backends, subtract_trace_time) restore_data = _start_profiling(prof, backends) try: yield prof finally: _stop_profiling(prof, *restore_data) if save is not None: prof.save(save) def profile_function(fun: Callable, args: tuple or list = (), kwargs: dict or None = None, backends=None, trace=True, subtract_trace_time=True, retime=True, warmup=1, call_count=1) -> Profile: """ Creates a `Profile` for the function `fun(*args, **kwargs)`. Args: fun: Function to be profiled. In case `retime=True`, this function must perform the same operations each time it is called. Use `warmup>0` to ensure that internal caching does not interfere with the operations. args: Arguments to be passed to `fun`. kwargs: Keyword arguments to be passed to `fun`. backends: List of backends to profile, `None` to profile all. trace: Whether to perform a full stack trace for each backend call. If true, groups backend calls by function. subtract_trace_time: If True, subtracts the time it took to trace the call stack from the event times. Has no effect if `retime=True`. retime: If true, calls `fun` another time without tracing the calls and updates the profile. This gives a much better indication of the true timing. See `Profile.retime()`. warmup: Number of times to call `fun` before profiling it. call_count: How often to call the function (excluding retime and warmup). The times will be averaged over multiple runs if `call_count > 1`. Returns: Created `Profile` for `fun`. """ kwargs = kwargs if isinstance(kwargs, dict) else {} for _ in range(warmup): fun(*args, **kwargs) with profile(backends=backends, trace=trace, subtract_trace_time=subtract_trace_time) as prof: fun(*args, **kwargs) if retime: with prof.retime(): fun(*args, **kwargs) if call_count > 1: with prof._accumulate_average(call_count): for _ in range(call_count - 1): fun(*args, **kwargs) return prof def _start_profiling(prof: Profile, backends: tuple or list): _PROFILE.append(prof) original_default = _DEFAULT[-1] original_backends = tuple(BACKENDS) for i, backend in enumerate(backends): prof_backend = ProfilingBackend(prof, backend, i) BACKENDS[BACKENDS.index(backend)] = prof_backend if _DEFAULT[-1] == backend: _DEFAULT[-1] = prof_backend return original_backends, original_default def _stop_profiling(prof: Profile, original_backends, original_default): prof._finish() _PROFILE.pop(-1) BACKENDS.clear() BACKENDS.extend(original_backends) _DEFAULT[-1] = original_default def get_current_profile() -> Optional[Profile]: """ Returns the currently active `Profile` if one is active. Otherwise returns `None`. """ return _PROFILE[-1] if _PROFILE else None
_closest_non_trivial_parent
identifier_name
_profile.py
import inspect import json from contextlib import contextmanager from time import perf_counter from typing import Optional, Callable from ._backend import Backend, BACKENDS, _DEFAULT class BackendCall: def __init__(self, start: float, stop: float, backend: 'ProfilingBackend', function_name): self._start = start self._stop = stop self._backend = backend self._function_name = function_name self._args = {"Backend": backend.name} def __repr__(self): return f"{1000 * self._duration:.2f} ms {self._function_name}" def print(self, include_parents, depth, min_duration, code_col, code_len): if self._duration >= min_duration: print(f"{' ' * depth}{1000 * self._duration:.2f} ms {self._backend}.{self._function_name}") @property def _name(self): return repr(self) @property def _duration(self): return self._stop - self._start def trace_json_events(self, include_parents) -> list: backend_index = self._backend._index name = self._function_name return [ { 'name': name, 'ph': 'X', 'pid': 1, 'tid': backend_index+1, 'ts': int(round(self._start * 1000000)), 'dur': int(round((self._stop - self._start) * 1000000)), 'args': self._args } ] def call_count(self) -> int: return 1 def add_arg(self, key, value): assert key not in self._args self._args[key] = value class ExtCall: """ Function invocation that is not a Backend method but internally calls Backend methods. """ def __init__(self, parent: 'ExtCall' or None, name: str, level: int, function: str, code_context: list or None, file_name: str, line_number: int): """ Args: parent: Parent call. name: Name of this call, see `ExtCall.determine_name()`. level: Number of parent stack items including this one. """ self._parent = parent if parent is None: self._parents = () else: self._parents = parent._parents + (parent,) self._children = [] # BackendCalls and ExtCalls self._converted = False self._name = name self._level = level self._function = function self._code_context = code_context self._file_name = file_name self._line_number = line_number def common_call(self, stack: list): """ Returns the deepest ExtCall in the hierarchy of this call that contains `stack`. """ if self._parent is None: return self if len(stack) < self._level: return self._parent.common_call(stack) for i in range(self._level - 1): if self._parents[i+1]._function != stack[-1-i].function: return self._parents[i] return self def add(self, child): self._children.append(child) @staticmethod def determine_name(info): fun = info.function if 'self' in info.frame.f_locals: if fun == '__init__': return f"{type(info.frame.f_locals['self']).__name__}()" return f"{type(info.frame.f_locals['self']).__name__}.{fun}" if 'phi/math' in info.filename or 'phi\\math' in info.filename: return f"math.{fun}" else: return fun @property def _start(self): return self._children[0]._start @property def _stop(self): return self._children[-1]._stop @property def _duration(self): return sum(c._duration for c in self._children) def call_count(self) -> int: return sum(child.call_count() for child in self._children) def __repr__(self): if not self._converted: if self._parent is None: return "/" return f"{self._name} ({self._level})" else: context = self._code_context return f"sum {1000 * self._duration:.2f} ms {context}" def __len__(self): return len(self._children) def _empty_parent_count(self): for i, parent in enumerate(reversed(self._parents)): if len(parent._children) > 1: return i return len(self._parents) def _eff_parent_count(self): return len([p for p in self._parents if len(p._children) > 1]) def _closest_non_trivial_parent(self): parent = self._parent while parent._parent is not None: if len(parent._children) > 1: return parent parent = parent._parent return parent def _calling_code(self, backtrack=0): if self._level > backtrack + 1: call: ExtCall = self._parents[-backtrack-1] return call._code_context[0].strip(), call._file_name, call._function, call._line_number else: return "", "", "", -1 def print(self, include_parents=(), depth=0, min_duration=0., code_col=80, code_len=50): if self._duration < min_duration: return if len(self._children) == 1 and isinstance(self._children[0], ExtCall): self._children[0].print(include_parents + ((self,) if self._parent is not None else ()), depth, min_duration, code_col, code_len) else: funcs = [par._name for par in include_parents] + [self._name] text = f"{'. ' * depth}-> {' -> '.join(funcs)} ({1000 * self._duration:.2f} ms)" if self._level > len(include_parents)+1: code = self._calling_code(backtrack=len(include_parents))[0] if len(code) > code_len: code = code[:code_len-3] + "..." text += " " + "." * max(0, (code_col - len(text))) + " > " + code print(text) for child in self._children: child.print((), depth + 1, min_duration, code_col, code_len) def children_to_properties(self) -> dict: result = {} for child in self._children: name = f"{len(result)} {child._name}" if len(self._children) <= 10 else f"{len(result):02d} {child._name}" while isinstance(child, ExtCall) and len(child) == 1: child = child._children[0] name += " -> " + child._name result[name] = child if isinstance(child, ExtCall): child.children_to_properties() # finalize for name, child in result.items(): setattr(self, name, child) self._converted = True return result def trace_json_events(self, include_parents=()) -> list: if len(self._children) == 1: return self._children[0].trace_json_events(include_parents + (self,)) else: name = ' -> '.join([par._name for par in include_parents] + [self._name]) eff_parent_count = self._eff_parent_count() calling_code, calling_filename, calling_function, lineno = self._calling_code(backtrack=self._empty_parent_count()) result = [ { 'name': name, 'ph': "X", # complete event 'pid': 0, 'tid': eff_parent_count, 'ts': int(self._start * 1000000), 'dur': int((self._stop - self._start) * 1000000), 'args': { "Calling code snippet": calling_code, "Called by": f"{calling_function}() in {calling_filename}, line {lineno}", "Active time (backend calls)": f"{self._duration * 1000:.2f} ms ({round(100 * self._duration / self._closest_non_trivial_parent()._duration):.0f}% of parent, {100 * self._duration / (self._stop - self._start):.1f}% efficiency)", "Backend calls": f"{self.call_count()} ({round(100 * self.call_count() / self._closest_non_trivial_parent().call_count()):.0f}% of parent)" } } ] for child in self._children: result.extend(child.trace_json_events(())) return result class Profile:
def _format_values(values: dict, backend): def format_val(value): if isinstance(value, str): return f'"{value}"' if isinstance(value, (int, float, complex, bool)): return value if isinstance(value, (tuple, list)): return str([format_val(v) for v in value]) try: shape = backend.shape(value) dtype = backend.dtype(value) try: shape = (int(dim) if dim is not None else '?' for dim in shape) except Exception: pass return f"{tuple(shape)}, {dtype}" except BaseException: return str(value) lines = [f"{key}: {format_val(val)}" for key, val in values.items()] return "\n".join(lines) class ProfilingBackend: def __init__(self, prof: Profile, backend: Backend, index: int): self._backend = backend self._profile = prof self._index = index # non-profiling methods self.name = backend.name self.combine_types = backend.combine_types self.auto_cast = backend.auto_cast self.is_tensor = backend.is_tensor self.is_available = backend.is_available self.shape = backend.shape self.staticshape = backend.staticshape self.ndims = backend.ndims self.dtype = backend.dtype self.expand_dims = backend.expand_dims self.reshape = backend.reshape self.supports = backend.supports # TODO strided slice does not go through backend atm # profiling methods for item_name in dir(backend): item = getattr(backend, item_name) if callable(item) and not hasattr(self, item_name): def context(item=item, item_name=item_name, profiling_backend=self): def call_fun(*args, **kwargs): start = perf_counter() result = item(*args, **kwargs) stop = perf_counter() prof._add_call(BackendCall(start, stop, profiling_backend, item_name), args, kwargs, result) return result return call_fun setattr(self, item_name, context()) def call(self, f: Callable, *args, name=None): start = perf_counter() result = f(*args) self._backend.block_until_ready(result) stop = perf_counter() self._profile._add_call(BackendCall(start, stop, self, name), args, {}, result) return result def __repr__(self): return f"profile[{self._backend}]" def __enter__(self): _DEFAULT.append(self) def __exit__(self, exc_type, exc_val, exc_tb): _DEFAULT.pop(-1) def __eq__(self, other): return other is self or other is self._backend def __hash__(self): return hash(self._backend) _PROFILE = [] @contextmanager def profile(backends=None, trace=True, subtract_trace_time=True, save: str or None = None) -> Profile: """ To be used in `with` statements, `with math.backend.profile() as prof: ...`. Creates a `Profile` for the code executed within the context by tracking calls to the `backends` and optionally tracing the call. Args: backends: List of backends to profile, `None` to profile all. trace: Whether to perform a full stack trace for each backend call. If true, groups backend calls by function. subtract_trace_time: If True, subtracts the time it took to trace the call stack from the event times save: (Optional) File path to save the profile to. This will call `Profile.save()`. Returns: Created `Profile` """ backends = BACKENDS if backends is None else backends prof = Profile(trace, backends, subtract_trace_time) restore_data = _start_profiling(prof, backends) try: yield prof finally: _stop_profiling(prof, *restore_data) if save is not None: prof.save(save) def profile_function(fun: Callable, args: tuple or list = (), kwargs: dict or None = None, backends=None, trace=True, subtract_trace_time=True, retime=True, warmup=1, call_count=1) -> Profile: """ Creates a `Profile` for the function `fun(*args, **kwargs)`. Args: fun: Function to be profiled. In case `retime=True`, this function must perform the same operations each time it is called. Use `warmup>0` to ensure that internal caching does not interfere with the operations. args: Arguments to be passed to `fun`. kwargs: Keyword arguments to be passed to `fun`. backends: List of backends to profile, `None` to profile all. trace: Whether to perform a full stack trace for each backend call. If true, groups backend calls by function. subtract_trace_time: If True, subtracts the time it took to trace the call stack from the event times. Has no effect if `retime=True`. retime: If true, calls `fun` another time without tracing the calls and updates the profile. This gives a much better indication of the true timing. See `Profile.retime()`. warmup: Number of times to call `fun` before profiling it. call_count: How often to call the function (excluding retime and warmup). The times will be averaged over multiple runs if `call_count > 1`. Returns: Created `Profile` for `fun`. """ kwargs = kwargs if isinstance(kwargs, dict) else {} for _ in range(warmup): fun(*args, **kwargs) with profile(backends=backends, trace=trace, subtract_trace_time=subtract_trace_time) as prof: fun(*args, **kwargs) if retime: with prof.retime(): fun(*args, **kwargs) if call_count > 1: with prof._accumulate_average(call_count): for _ in range(call_count - 1): fun(*args, **kwargs) return prof def _start_profiling(prof: Profile, backends: tuple or list): _PROFILE.append(prof) original_default = _DEFAULT[-1] original_backends = tuple(BACKENDS) for i, backend in enumerate(backends): prof_backend = ProfilingBackend(prof, backend, i) BACKENDS[BACKENDS.index(backend)] = prof_backend if _DEFAULT[-1] == backend: _DEFAULT[-1] = prof_backend return original_backends, original_default def _stop_profiling(prof: Profile, original_backends, original_default): prof._finish() _PROFILE.pop(-1) BACKENDS.clear() BACKENDS.extend(original_backends) _DEFAULT[-1] = original_default def get_current_profile() -> Optional[Profile]: """ Returns the currently active `Profile` if one is active. Otherwise returns `None`. """ return _PROFILE[-1] if _PROFILE else None
""" Stores information about calls to backends and their timing. Profile may be created through `profile()` or `profile_function()`. Profiles can be printed or saved to disc. """ def __init__(self, trace: bool, backends: tuple or list, subtract_trace_time: bool): self._start = perf_counter() self._stop = None self._root = ExtCall(None, "", 0, "", "", "", -1) self._last_ext_call = self._root self._messages = [] self._trace = trace self._backend_calls = [] self._retime_index = -1 self._accumulating = False self._backends = backends self._subtract_trace_time = subtract_trace_time self._total_trace_time = 0 def _add_call(self, backend_call: BackendCall, args: tuple, kwargs: dict, result): if self._retime_index >= 0: prev_call = self._backend_calls[self._retime_index] assert prev_call._function_name == backend_call._function_name if self._accumulating: prev_call._start += backend_call._start prev_call._stop += backend_call._stop else: prev_call._start = backend_call._start prev_call._stop = backend_call._stop self._retime_index = (self._retime_index + 1) % len(self._backend_calls) else: self._backend_calls.append(backend_call) args = {i: arg for i, arg in enumerate(args)} args.update(kwargs) backend_call.add_arg("Inputs", _format_values(args, backend_call._backend)) if isinstance(result, (tuple, list)): backend_call.add_arg("Outputs", _format_values({i: res for i, res in enumerate(result)}, backend_call._backend)) else: backend_call.add_arg("Outputs", _format_values({0: result}, backend_call._backend)) if self._trace: stack = inspect.stack()[2:] call = self._last_ext_call.common_call(stack) for i in range(call._level, len(stack)): stack_frame = stack[len(stack) - i - 1] name = ExtCall.determine_name(stack_frame) # if len(stack) - i > 1 else "" sub_call = ExtCall(call, name, i + 1, stack_frame.function, stack_frame.code_context, stack_frame.filename, stack_frame.lineno) call.add(sub_call) call = sub_call call.add(backend_call) self._last_ext_call = call if self._subtract_trace_time: delta_trace_time = perf_counter() - backend_call._stop backend_call._start -= self._total_trace_time backend_call._stop -= self._total_trace_time self._total_trace_time += delta_trace_time def _finish(self): self._stop = perf_counter() self._children_to_properties() @property def duration(self) -> float: """ Total time passed from creation of the profile to the end of the last operation. """ return self._stop - self._start if self._stop is not None else None def print(self, min_duration=1e-3, code_col=80, code_len=50): """ Prints this profile to the console. Args: min_duration: Hides elements with less time spent on backend calls than `min_duration` (seconds) code_col: Formatting option for where the context code is printed. code_len: Formatting option for cropping the context code """ print(f"Profile: {self.duration:.4f} seconds total. Skipping elements shorter than {1000 * min_duration:.2f} ms") if self._messages: print("External profiling:") for message in self._messages: print(f" {message}") print() self._root.print(min_duration=min_duration, code_col=code_col, code_len=code_len) def save(self, json_file: str): """ Saves this profile to disc using the *trace event format* described at https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit This file can be viewed with external applications such as Google chrome. Args: json_file: filename """ data = [ {'name': "process_name", 'ph': 'M', 'pid': 0, 'tid': 0, "args": {"name": "0 Python calls"}}, {'name': "process_name", 'ph': 'M', 'pid': 1, 'tid': 1, "args": {"name": "1 Operations"}}, ] + [ {'name': "thread_name", 'ph': 'M', 'pid': 1, 'tid': i + 1, "args": {"name": backend.name}} for i, backend in enumerate(self._backends) ] if self._trace: if len(self._root._children) > 0: data.extend(self._root.trace_json_events()) else: data.extend(sum([call.trace_json_events(()) for call in self._backend_calls], [])) with open(json_file, 'w') as file: json.dump(data, file) save_trace = save def _children_to_properties(self): children = self._root.children_to_properties() for name, child in children.items(): setattr(self, name, child) def add_external_message(self, message: str): """ Stores an external message in this profile. External messages are printed in `Profile.print()`. """ self._messages.append(message) @contextmanager def retime(self): """ To be used in `with` statements, `with prof.retime(): ...`. Updates this profile by running the same operations again but without tracing. This gives a much better indication of the true timing. The code within the `with` block must perform the same operations as the code that created this profile. *Warning:* Internal caching may reduce the number of operations after the first time a function is called. To prevent this, run the function before profiling it, see `warmup` in `profile_function()`. """ self._retime_index = 0 restore_data = _start_profiling(self, self._backends) try: yield None finally: _stop_profiling(self, *restore_data) assert self._retime_index == 0, f"Number of calls during retime did not match original profile, originally {len(self._backend_calls)}, now {self._retime_index}, " self._retime_index = -1 @contextmanager def _accumulate_average(self, n): self._retime_index = 0 self._accumulating = True restore_data = _start_profiling(self, self._backends) try: yield None finally: _stop_profiling(self, *restore_data) assert self._retime_index == 0, f"Number of calls during retime did not match original profile, originally {len(self._backend_calls)}, now {self._retime_index}, " self._retime_index = -1 for call in self._backend_calls: call._start /= n call._stop /= n self._accumulating = False
identifier_body
_profile.py
import inspect import json from contextlib import contextmanager from time import perf_counter from typing import Optional, Callable from ._backend import Backend, BACKENDS, _DEFAULT class BackendCall: def __init__(self, start: float, stop: float, backend: 'ProfilingBackend', function_name): self._start = start self._stop = stop self._backend = backend self._function_name = function_name self._args = {"Backend": backend.name} def __repr__(self): return f"{1000 * self._duration:.2f} ms {self._function_name}" def print(self, include_parents, depth, min_duration, code_col, code_len): if self._duration >= min_duration: print(f"{' ' * depth}{1000 * self._duration:.2f} ms {self._backend}.{self._function_name}") @property def _name(self): return repr(self) @property def _duration(self): return self._stop - self._start def trace_json_events(self, include_parents) -> list: backend_index = self._backend._index name = self._function_name return [ { 'name': name, 'ph': 'X', 'pid': 1, 'tid': backend_index+1, 'ts': int(round(self._start * 1000000)), 'dur': int(round((self._stop - self._start) * 1000000)), 'args': self._args } ] def call_count(self) -> int: return 1 def add_arg(self, key, value): assert key not in self._args self._args[key] = value class ExtCall: """ Function invocation that is not a Backend method but internally calls Backend methods. """ def __init__(self, parent: 'ExtCall' or None, name: str, level: int, function: str, code_context: list or None, file_name: str, line_number: int): """ Args: parent: Parent call. name: Name of this call, see `ExtCall.determine_name()`. level: Number of parent stack items including this one. """ self._parent = parent if parent is None: self._parents = () else: self._parents = parent._parents + (parent,) self._children = [] # BackendCalls and ExtCalls self._converted = False self._name = name self._level = level self._function = function self._code_context = code_context self._file_name = file_name self._line_number = line_number def common_call(self, stack: list): """ Returns the deepest ExtCall in the hierarchy of this call that contains `stack`. """ if self._parent is None: return self if len(stack) < self._level: return self._parent.common_call(stack) for i in range(self._level - 1): if self._parents[i+1]._function != stack[-1-i].function: return self._parents[i] return self def add(self, child): self._children.append(child) @staticmethod def determine_name(info): fun = info.function if 'self' in info.frame.f_locals: if fun == '__init__': return f"{type(info.frame.f_locals['self']).__name__}()" return f"{type(info.frame.f_locals['self']).__name__}.{fun}" if 'phi/math' in info.filename or 'phi\\math' in info.filename: return f"math.{fun}" else: return fun @property def _start(self): return self._children[0]._start @property def _stop(self): return self._children[-1]._stop @property def _duration(self): return sum(c._duration for c in self._children) def call_count(self) -> int: return sum(child.call_count() for child in self._children) def __repr__(self): if not self._converted: if self._parent is None: return "/" return f"{self._name} ({self._level})" else: context = self._code_context return f"sum {1000 * self._duration:.2f} ms {context}" def __len__(self): return len(self._children) def _empty_parent_count(self): for i, parent in enumerate(reversed(self._parents)): if len(parent._children) > 1: return i return len(self._parents) def _eff_parent_count(self): return len([p for p in self._parents if len(p._children) > 1]) def _closest_non_trivial_parent(self): parent = self._parent while parent._parent is not None: if len(parent._children) > 1: return parent parent = parent._parent return parent def _calling_code(self, backtrack=0): if self._level > backtrack + 1: call: ExtCall = self._parents[-backtrack-1] return call._code_context[0].strip(), call._file_name, call._function, call._line_number else: return "", "", "", -1 def print(self, include_parents=(), depth=0, min_duration=0., code_col=80, code_len=50): if self._duration < min_duration: return if len(self._children) == 1 and isinstance(self._children[0], ExtCall): self._children[0].print(include_parents + ((self,) if self._parent is not None else ()), depth, min_duration, code_col, code_len) else: funcs = [par._name for par in include_parents] + [self._name] text = f"{'. ' * depth}-> {' -> '.join(funcs)} ({1000 * self._duration:.2f} ms)" if self._level > len(include_parents)+1: code = self._calling_code(backtrack=len(include_parents))[0] if len(code) > code_len: code = code[:code_len-3] + "..." text += " " + "." * max(0, (code_col - len(text))) + " > " + code print(text) for child in self._children: child.print((), depth + 1, min_duration, code_col, code_len) def children_to_properties(self) -> dict: result = {} for child in self._children: name = f"{len(result)} {child._name}" if len(self._children) <= 10 else f"{len(result):02d} {child._name}" while isinstance(child, ExtCall) and len(child) == 1: child = child._children[0] name += " -> " + child._name result[name] = child if isinstance(child, ExtCall): child.children_to_properties() # finalize for name, child in result.items(): setattr(self, name, child) self._converted = True return result def trace_json_events(self, include_parents=()) -> list: if len(self._children) == 1: return self._children[0].trace_json_events(include_parents + (self,)) else: name = ' -> '.join([par._name for par in include_parents] + [self._name]) eff_parent_count = self._eff_parent_count() calling_code, calling_filename, calling_function, lineno = self._calling_code(backtrack=self._empty_parent_count()) result = [ { 'name': name, 'ph': "X", # complete event 'pid': 0, 'tid': eff_parent_count, 'ts': int(self._start * 1000000), 'dur': int((self._stop - self._start) * 1000000), 'args': { "Calling code snippet": calling_code, "Called by": f"{calling_function}() in {calling_filename}, line {lineno}", "Active time (backend calls)": f"{self._duration * 1000:.2f} ms ({round(100 * self._duration / self._closest_non_trivial_parent()._duration):.0f}% of parent, {100 * self._duration / (self._stop - self._start):.1f}% efficiency)", "Backend calls": f"{self.call_count()} ({round(100 * self.call_count() / self._closest_non_trivial_parent().call_count()):.0f}% of parent)" } } ] for child in self._children: result.extend(child.trace_json_events(())) return result class Profile: """ Stores information about calls to backends and their timing. Profile may be created through `profile()` or `profile_function()`. Profiles can be printed or saved to disc. """ def __init__(self, trace: bool, backends: tuple or list, subtract_trace_time: bool): self._start = perf_counter() self._stop = None self._root = ExtCall(None, "", 0, "", "", "", -1) self._last_ext_call = self._root self._messages = [] self._trace = trace self._backend_calls = [] self._retime_index = -1 self._accumulating = False self._backends = backends self._subtract_trace_time = subtract_trace_time self._total_trace_time = 0 def _add_call(self, backend_call: BackendCall, args: tuple, kwargs: dict, result): if self._retime_index >= 0: prev_call = self._backend_calls[self._retime_index] assert prev_call._function_name == backend_call._function_name if self._accumulating: prev_call._start += backend_call._start prev_call._stop += backend_call._stop else: prev_call._start = backend_call._start prev_call._stop = backend_call._stop self._retime_index = (self._retime_index + 1) % len(self._backend_calls) else: self._backend_calls.append(backend_call) args = {i: arg for i, arg in enumerate(args)} args.update(kwargs) backend_call.add_arg("Inputs", _format_values(args, backend_call._backend)) if isinstance(result, (tuple, list)): backend_call.add_arg("Outputs", _format_values({i: res for i, res in enumerate(result)}, backend_call._backend)) else: backend_call.add_arg("Outputs", _format_values({0: result}, backend_call._backend)) if self._trace: stack = inspect.stack()[2:] call = self._last_ext_call.common_call(stack) for i in range(call._level, len(stack)): stack_frame = stack[len(stack) - i - 1] name = ExtCall.determine_name(stack_frame) # if len(stack) - i > 1 else "" sub_call = ExtCall(call, name, i + 1, stack_frame.function, stack_frame.code_context, stack_frame.filename, stack_frame.lineno) call.add(sub_call) call = sub_call call.add(backend_call) self._last_ext_call = call if self._subtract_trace_time: delta_trace_time = perf_counter() - backend_call._stop backend_call._start -= self._total_trace_time backend_call._stop -= self._total_trace_time self._total_trace_time += delta_trace_time def _finish(self):
self._stop = perf_counter() self._children_to_properties() @property def duration(self) -> float: """ Total time passed from creation of the profile to the end of the last operation. """ return self._stop - self._start if self._stop is not None else None def print(self, min_duration=1e-3, code_col=80, code_len=50): """ Prints this profile to the console. Args: min_duration: Hides elements with less time spent on backend calls than `min_duration` (seconds) code_col: Formatting option for where the context code is printed. code_len: Formatting option for cropping the context code """ print(f"Profile: {self.duration:.4f} seconds total. Skipping elements shorter than {1000 * min_duration:.2f} ms") if self._messages: print("External profiling:") for message in self._messages: print(f" {message}") print() self._root.print(min_duration=min_duration, code_col=code_col, code_len=code_len) def save(self, json_file: str): """ Saves this profile to disc using the *trace event format* described at https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit This file can be viewed with external applications such as Google chrome. Args: json_file: filename """ data = [ {'name': "process_name", 'ph': 'M', 'pid': 0, 'tid': 0, "args": {"name": "0 Python calls"}}, {'name': "process_name", 'ph': 'M', 'pid': 1, 'tid': 1, "args": {"name": "1 Operations"}}, ] + [ {'name': "thread_name", 'ph': 'M', 'pid': 1, 'tid': i + 1, "args": {"name": backend.name}} for i, backend in enumerate(self._backends) ] if self._trace: if len(self._root._children) > 0: data.extend(self._root.trace_json_events()) else: data.extend(sum([call.trace_json_events(()) for call in self._backend_calls], [])) with open(json_file, 'w') as file: json.dump(data, file) save_trace = save def _children_to_properties(self): children = self._root.children_to_properties() for name, child in children.items(): setattr(self, name, child) def add_external_message(self, message: str): """ Stores an external message in this profile. External messages are printed in `Profile.print()`. """ self._messages.append(message) @contextmanager def retime(self): """ To be used in `with` statements, `with prof.retime(): ...`. Updates this profile by running the same operations again but without tracing. This gives a much better indication of the true timing. The code within the `with` block must perform the same operations as the code that created this profile. *Warning:* Internal caching may reduce the number of operations after the first time a function is called. To prevent this, run the function before profiling it, see `warmup` in `profile_function()`. """ self._retime_index = 0 restore_data = _start_profiling(self, self._backends) try: yield None finally: _stop_profiling(self, *restore_data) assert self._retime_index == 0, f"Number of calls during retime did not match original profile, originally {len(self._backend_calls)}, now {self._retime_index}, " self._retime_index = -1 @contextmanager def _accumulate_average(self, n): self._retime_index = 0 self._accumulating = True restore_data = _start_profiling(self, self._backends) try: yield None finally: _stop_profiling(self, *restore_data) assert self._retime_index == 0, f"Number of calls during retime did not match original profile, originally {len(self._backend_calls)}, now {self._retime_index}, " self._retime_index = -1 for call in self._backend_calls: call._start /= n call._stop /= n self._accumulating = False def _format_values(values: dict, backend): def format_val(value): if isinstance(value, str): return f'"{value}"' if isinstance(value, (int, float, complex, bool)): return value if isinstance(value, (tuple, list)): return str([format_val(v) for v in value]) try: shape = backend.shape(value) dtype = backend.dtype(value) try: shape = (int(dim) if dim is not None else '?' for dim in shape) except Exception: pass return f"{tuple(shape)}, {dtype}" except BaseException: return str(value) lines = [f"{key}: {format_val(val)}" for key, val in values.items()] return "\n".join(lines) class ProfilingBackend: def __init__(self, prof: Profile, backend: Backend, index: int): self._backend = backend self._profile = prof self._index = index # non-profiling methods self.name = backend.name self.combine_types = backend.combine_types self.auto_cast = backend.auto_cast self.is_tensor = backend.is_tensor self.is_available = backend.is_available self.shape = backend.shape self.staticshape = backend.staticshape self.ndims = backend.ndims self.dtype = backend.dtype self.expand_dims = backend.expand_dims self.reshape = backend.reshape self.supports = backend.supports # TODO strided slice does not go through backend atm # profiling methods for item_name in dir(backend): item = getattr(backend, item_name) if callable(item) and not hasattr(self, item_name): def context(item=item, item_name=item_name, profiling_backend=self): def call_fun(*args, **kwargs): start = perf_counter() result = item(*args, **kwargs) stop = perf_counter() prof._add_call(BackendCall(start, stop, profiling_backend, item_name), args, kwargs, result) return result return call_fun setattr(self, item_name, context()) def call(self, f: Callable, *args, name=None): start = perf_counter() result = f(*args) self._backend.block_until_ready(result) stop = perf_counter() self._profile._add_call(BackendCall(start, stop, self, name), args, {}, result) return result def __repr__(self): return f"profile[{self._backend}]" def __enter__(self): _DEFAULT.append(self) def __exit__(self, exc_type, exc_val, exc_tb): _DEFAULT.pop(-1) def __eq__(self, other): return other is self or other is self._backend def __hash__(self): return hash(self._backend) _PROFILE = [] @contextmanager def profile(backends=None, trace=True, subtract_trace_time=True, save: str or None = None) -> Profile: """ To be used in `with` statements, `with math.backend.profile() as prof: ...`. Creates a `Profile` for the code executed within the context by tracking calls to the `backends` and optionally tracing the call. Args: backends: List of backends to profile, `None` to profile all. trace: Whether to perform a full stack trace for each backend call. If true, groups backend calls by function. subtract_trace_time: If True, subtracts the time it took to trace the call stack from the event times save: (Optional) File path to save the profile to. This will call `Profile.save()`. Returns: Created `Profile` """ backends = BACKENDS if backends is None else backends prof = Profile(trace, backends, subtract_trace_time) restore_data = _start_profiling(prof, backends) try: yield prof finally: _stop_profiling(prof, *restore_data) if save is not None: prof.save(save) def profile_function(fun: Callable, args: tuple or list = (), kwargs: dict or None = None, backends=None, trace=True, subtract_trace_time=True, retime=True, warmup=1, call_count=1) -> Profile: """ Creates a `Profile` for the function `fun(*args, **kwargs)`. Args: fun: Function to be profiled. In case `retime=True`, this function must perform the same operations each time it is called. Use `warmup>0` to ensure that internal caching does not interfere with the operations. args: Arguments to be passed to `fun`. kwargs: Keyword arguments to be passed to `fun`. backends: List of backends to profile, `None` to profile all. trace: Whether to perform a full stack trace for each backend call. If true, groups backend calls by function. subtract_trace_time: If True, subtracts the time it took to trace the call stack from the event times. Has no effect if `retime=True`. retime: If true, calls `fun` another time without tracing the calls and updates the profile. This gives a much better indication of the true timing. See `Profile.retime()`. warmup: Number of times to call `fun` before profiling it. call_count: How often to call the function (excluding retime and warmup). The times will be averaged over multiple runs if `call_count > 1`. Returns: Created `Profile` for `fun`. """ kwargs = kwargs if isinstance(kwargs, dict) else {} for _ in range(warmup): fun(*args, **kwargs) with profile(backends=backends, trace=trace, subtract_trace_time=subtract_trace_time) as prof: fun(*args, **kwargs) if retime: with prof.retime(): fun(*args, **kwargs) if call_count > 1: with prof._accumulate_average(call_count): for _ in range(call_count - 1): fun(*args, **kwargs) return prof def _start_profiling(prof: Profile, backends: tuple or list): _PROFILE.append(prof) original_default = _DEFAULT[-1] original_backends = tuple(BACKENDS) for i, backend in enumerate(backends): prof_backend = ProfilingBackend(prof, backend, i) BACKENDS[BACKENDS.index(backend)] = prof_backend if _DEFAULT[-1] == backend: _DEFAULT[-1] = prof_backend return original_backends, original_default def _stop_profiling(prof: Profile, original_backends, original_default): prof._finish() _PROFILE.pop(-1) BACKENDS.clear() BACKENDS.extend(original_backends) _DEFAULT[-1] = original_default def get_current_profile() -> Optional[Profile]: """ Returns the currently active `Profile` if one is active. Otherwise returns `None`. """ return _PROFILE[-1] if _PROFILE else None
random_line_split
_profile.py
import inspect import json from contextlib import contextmanager from time import perf_counter from typing import Optional, Callable from ._backend import Backend, BACKENDS, _DEFAULT class BackendCall: def __init__(self, start: float, stop: float, backend: 'ProfilingBackend', function_name): self._start = start self._stop = stop self._backend = backend self._function_name = function_name self._args = {"Backend": backend.name} def __repr__(self): return f"{1000 * self._duration:.2f} ms {self._function_name}" def print(self, include_parents, depth, min_duration, code_col, code_len): if self._duration >= min_duration: print(f"{' ' * depth}{1000 * self._duration:.2f} ms {self._backend}.{self._function_name}") @property def _name(self): return repr(self) @property def _duration(self): return self._stop - self._start def trace_json_events(self, include_parents) -> list: backend_index = self._backend._index name = self._function_name return [ { 'name': name, 'ph': 'X', 'pid': 1, 'tid': backend_index+1, 'ts': int(round(self._start * 1000000)), 'dur': int(round((self._stop - self._start) * 1000000)), 'args': self._args } ] def call_count(self) -> int: return 1 def add_arg(self, key, value): assert key not in self._args self._args[key] = value class ExtCall: """ Function invocation that is not a Backend method but internally calls Backend methods. """ def __init__(self, parent: 'ExtCall' or None, name: str, level: int, function: str, code_context: list or None, file_name: str, line_number: int): """ Args: parent: Parent call. name: Name of this call, see `ExtCall.determine_name()`. level: Number of parent stack items including this one. """ self._parent = parent if parent is None: self._parents = () else: self._parents = parent._parents + (parent,) self._children = [] # BackendCalls and ExtCalls self._converted = False self._name = name self._level = level self._function = function self._code_context = code_context self._file_name = file_name self._line_number = line_number def common_call(self, stack: list): """ Returns the deepest ExtCall in the hierarchy of this call that contains `stack`. """ if self._parent is None: return self if len(stack) < self._level: return self._parent.common_call(stack) for i in range(self._level - 1): if self._parents[i+1]._function != stack[-1-i].function: return self._parents[i] return self def add(self, child): self._children.append(child) @staticmethod def determine_name(info): fun = info.function if 'self' in info.frame.f_locals: if fun == '__init__': return f"{type(info.frame.f_locals['self']).__name__}()" return f"{type(info.frame.f_locals['self']).__name__}.{fun}" if 'phi/math' in info.filename or 'phi\\math' in info.filename: return f"math.{fun}" else: return fun @property def _start(self): return self._children[0]._start @property def _stop(self): return self._children[-1]._stop @property def _duration(self): return sum(c._duration for c in self._children) def call_count(self) -> int: return sum(child.call_count() for child in self._children) def __repr__(self): if not self._converted: if self._parent is None: return "/" return f"{self._name} ({self._level})" else: context = self._code_context return f"sum {1000 * self._duration:.2f} ms {context}" def __len__(self): return len(self._children) def _empty_parent_count(self): for i, parent in enumerate(reversed(self._parents)): if len(parent._children) > 1: return i return len(self._parents) def _eff_parent_count(self): return len([p for p in self._parents if len(p._children) > 1]) def _closest_non_trivial_parent(self): parent = self._parent while parent._parent is not None: if len(parent._children) > 1: return parent parent = parent._parent return parent def _calling_code(self, backtrack=0): if self._level > backtrack + 1: call: ExtCall = self._parents[-backtrack-1] return call._code_context[0].strip(), call._file_name, call._function, call._line_number else: return "", "", "", -1 def print(self, include_parents=(), depth=0, min_duration=0., code_col=80, code_len=50): if self._duration < min_duration: return if len(self._children) == 1 and isinstance(self._children[0], ExtCall): self._children[0].print(include_parents + ((self,) if self._parent is not None else ()), depth, min_duration, code_col, code_len) else: funcs = [par._name for par in include_parents] + [self._name] text = f"{'. ' * depth}-> {' -> '.join(funcs)} ({1000 * self._duration:.2f} ms)" if self._level > len(include_parents)+1: code = self._calling_code(backtrack=len(include_parents))[0] if len(code) > code_len: code = code[:code_len-3] + "..." text += " " + "." * max(0, (code_col - len(text))) + " > " + code print(text) for child in self._children: child.print((), depth + 1, min_duration, code_col, code_len) def children_to_properties(self) -> dict: result = {} for child in self._children: name = f"{len(result)} {child._name}" if len(self._children) <= 10 else f"{len(result):02d} {child._name}" while isinstance(child, ExtCall) and len(child) == 1: child = child._children[0] name += " -> " + child._name result[name] = child if isinstance(child, ExtCall): child.children_to_properties() # finalize for name, child in result.items(): setattr(self, name, child) self._converted = True return result def trace_json_events(self, include_parents=()) -> list: if len(self._children) == 1: return self._children[0].trace_json_events(include_parents + (self,)) else: name = ' -> '.join([par._name for par in include_parents] + [self._name]) eff_parent_count = self._eff_parent_count() calling_code, calling_filename, calling_function, lineno = self._calling_code(backtrack=self._empty_parent_count()) result = [ { 'name': name, 'ph': "X", # complete event 'pid': 0, 'tid': eff_parent_count, 'ts': int(self._start * 1000000), 'dur': int((self._stop - self._start) * 1000000), 'args': { "Calling code snippet": calling_code, "Called by": f"{calling_function}() in {calling_filename}, line {lineno}", "Active time (backend calls)": f"{self._duration * 1000:.2f} ms ({round(100 * self._duration / self._closest_non_trivial_parent()._duration):.0f}% of parent, {100 * self._duration / (self._stop - self._start):.1f}% efficiency)", "Backend calls": f"{self.call_count()} ({round(100 * self.call_count() / self._closest_non_trivial_parent().call_count()):.0f}% of parent)" } } ] for child in self._children: result.extend(child.trace_json_events(())) return result class Profile: """ Stores information about calls to backends and their timing. Profile may be created through `profile()` or `profile_function()`. Profiles can be printed or saved to disc. """ def __init__(self, trace: bool, backends: tuple or list, subtract_trace_time: bool): self._start = perf_counter() self._stop = None self._root = ExtCall(None, "", 0, "", "", "", -1) self._last_ext_call = self._root self._messages = [] self._trace = trace self._backend_calls = [] self._retime_index = -1 self._accumulating = False self._backends = backends self._subtract_trace_time = subtract_trace_time self._total_trace_time = 0 def _add_call(self, backend_call: BackendCall, args: tuple, kwargs: dict, result): if self._retime_index >= 0: prev_call = self._backend_calls[self._retime_index] assert prev_call._function_name == backend_call._function_name if self._accumulating: prev_call._start += backend_call._start prev_call._stop += backend_call._stop else: prev_call._start = backend_call._start prev_call._stop = backend_call._stop self._retime_index = (self._retime_index + 1) % len(self._backend_calls) else: self._backend_calls.append(backend_call) args = {i: arg for i, arg in enumerate(args)} args.update(kwargs) backend_call.add_arg("Inputs", _format_values(args, backend_call._backend)) if isinstance(result, (tuple, list)): backend_call.add_arg("Outputs", _format_values({i: res for i, res in enumerate(result)}, backend_call._backend)) else:
if self._trace: stack = inspect.stack()[2:] call = self._last_ext_call.common_call(stack) for i in range(call._level, len(stack)): stack_frame = stack[len(stack) - i - 1] name = ExtCall.determine_name(stack_frame) # if len(stack) - i > 1 else "" sub_call = ExtCall(call, name, i + 1, stack_frame.function, stack_frame.code_context, stack_frame.filename, stack_frame.lineno) call.add(sub_call) call = sub_call call.add(backend_call) self._last_ext_call = call if self._subtract_trace_time: delta_trace_time = perf_counter() - backend_call._stop backend_call._start -= self._total_trace_time backend_call._stop -= self._total_trace_time self._total_trace_time += delta_trace_time def _finish(self): self._stop = perf_counter() self._children_to_properties() @property def duration(self) -> float: """ Total time passed from creation of the profile to the end of the last operation. """ return self._stop - self._start if self._stop is not None else None def print(self, min_duration=1e-3, code_col=80, code_len=50): """ Prints this profile to the console. Args: min_duration: Hides elements with less time spent on backend calls than `min_duration` (seconds) code_col: Formatting option for where the context code is printed. code_len: Formatting option for cropping the context code """ print(f"Profile: {self.duration:.4f} seconds total. Skipping elements shorter than {1000 * min_duration:.2f} ms") if self._messages: print("External profiling:") for message in self._messages: print(f" {message}") print() self._root.print(min_duration=min_duration, code_col=code_col, code_len=code_len) def save(self, json_file: str): """ Saves this profile to disc using the *trace event format* described at https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit This file can be viewed with external applications such as Google chrome. Args: json_file: filename """ data = [ {'name': "process_name", 'ph': 'M', 'pid': 0, 'tid': 0, "args": {"name": "0 Python calls"}}, {'name': "process_name", 'ph': 'M', 'pid': 1, 'tid': 1, "args": {"name": "1 Operations"}}, ] + [ {'name': "thread_name", 'ph': 'M', 'pid': 1, 'tid': i + 1, "args": {"name": backend.name}} for i, backend in enumerate(self._backends) ] if self._trace: if len(self._root._children) > 0: data.extend(self._root.trace_json_events()) else: data.extend(sum([call.trace_json_events(()) for call in self._backend_calls], [])) with open(json_file, 'w') as file: json.dump(data, file) save_trace = save def _children_to_properties(self): children = self._root.children_to_properties() for name, child in children.items(): setattr(self, name, child) def add_external_message(self, message: str): """ Stores an external message in this profile. External messages are printed in `Profile.print()`. """ self._messages.append(message) @contextmanager def retime(self): """ To be used in `with` statements, `with prof.retime(): ...`. Updates this profile by running the same operations again but without tracing. This gives a much better indication of the true timing. The code within the `with` block must perform the same operations as the code that created this profile. *Warning:* Internal caching may reduce the number of operations after the first time a function is called. To prevent this, run the function before profiling it, see `warmup` in `profile_function()`. """ self._retime_index = 0 restore_data = _start_profiling(self, self._backends) try: yield None finally: _stop_profiling(self, *restore_data) assert self._retime_index == 0, f"Number of calls during retime did not match original profile, originally {len(self._backend_calls)}, now {self._retime_index}, " self._retime_index = -1 @contextmanager def _accumulate_average(self, n): self._retime_index = 0 self._accumulating = True restore_data = _start_profiling(self, self._backends) try: yield None finally: _stop_profiling(self, *restore_data) assert self._retime_index == 0, f"Number of calls during retime did not match original profile, originally {len(self._backend_calls)}, now {self._retime_index}, " self._retime_index = -1 for call in self._backend_calls: call._start /= n call._stop /= n self._accumulating = False def _format_values(values: dict, backend): def format_val(value): if isinstance(value, str): return f'"{value}"' if isinstance(value, (int, float, complex, bool)): return value if isinstance(value, (tuple, list)): return str([format_val(v) for v in value]) try: shape = backend.shape(value) dtype = backend.dtype(value) try: shape = (int(dim) if dim is not None else '?' for dim in shape) except Exception: pass return f"{tuple(shape)}, {dtype}" except BaseException: return str(value) lines = [f"{key}: {format_val(val)}" for key, val in values.items()] return "\n".join(lines) class ProfilingBackend: def __init__(self, prof: Profile, backend: Backend, index: int): self._backend = backend self._profile = prof self._index = index # non-profiling methods self.name = backend.name self.combine_types = backend.combine_types self.auto_cast = backend.auto_cast self.is_tensor = backend.is_tensor self.is_available = backend.is_available self.shape = backend.shape self.staticshape = backend.staticshape self.ndims = backend.ndims self.dtype = backend.dtype self.expand_dims = backend.expand_dims self.reshape = backend.reshape self.supports = backend.supports # TODO strided slice does not go through backend atm # profiling methods for item_name in dir(backend): item = getattr(backend, item_name) if callable(item) and not hasattr(self, item_name): def context(item=item, item_name=item_name, profiling_backend=self): def call_fun(*args, **kwargs): start = perf_counter() result = item(*args, **kwargs) stop = perf_counter() prof._add_call(BackendCall(start, stop, profiling_backend, item_name), args, kwargs, result) return result return call_fun setattr(self, item_name, context()) def call(self, f: Callable, *args, name=None): start = perf_counter() result = f(*args) self._backend.block_until_ready(result) stop = perf_counter() self._profile._add_call(BackendCall(start, stop, self, name), args, {}, result) return result def __repr__(self): return f"profile[{self._backend}]" def __enter__(self): _DEFAULT.append(self) def __exit__(self, exc_type, exc_val, exc_tb): _DEFAULT.pop(-1) def __eq__(self, other): return other is self or other is self._backend def __hash__(self): return hash(self._backend) _PROFILE = [] @contextmanager def profile(backends=None, trace=True, subtract_trace_time=True, save: str or None = None) -> Profile: """ To be used in `with` statements, `with math.backend.profile() as prof: ...`. Creates a `Profile` for the code executed within the context by tracking calls to the `backends` and optionally tracing the call. Args: backends: List of backends to profile, `None` to profile all. trace: Whether to perform a full stack trace for each backend call. If true, groups backend calls by function. subtract_trace_time: If True, subtracts the time it took to trace the call stack from the event times save: (Optional) File path to save the profile to. This will call `Profile.save()`. Returns: Created `Profile` """ backends = BACKENDS if backends is None else backends prof = Profile(trace, backends, subtract_trace_time) restore_data = _start_profiling(prof, backends) try: yield prof finally: _stop_profiling(prof, *restore_data) if save is not None: prof.save(save) def profile_function(fun: Callable, args: tuple or list = (), kwargs: dict or None = None, backends=None, trace=True, subtract_trace_time=True, retime=True, warmup=1, call_count=1) -> Profile: """ Creates a `Profile` for the function `fun(*args, **kwargs)`. Args: fun: Function to be profiled. In case `retime=True`, this function must perform the same operations each time it is called. Use `warmup>0` to ensure that internal caching does not interfere with the operations. args: Arguments to be passed to `fun`. kwargs: Keyword arguments to be passed to `fun`. backends: List of backends to profile, `None` to profile all. trace: Whether to perform a full stack trace for each backend call. If true, groups backend calls by function. subtract_trace_time: If True, subtracts the time it took to trace the call stack from the event times. Has no effect if `retime=True`. retime: If true, calls `fun` another time without tracing the calls and updates the profile. This gives a much better indication of the true timing. See `Profile.retime()`. warmup: Number of times to call `fun` before profiling it. call_count: How often to call the function (excluding retime and warmup). The times will be averaged over multiple runs if `call_count > 1`. Returns: Created `Profile` for `fun`. """ kwargs = kwargs if isinstance(kwargs, dict) else {} for _ in range(warmup): fun(*args, **kwargs) with profile(backends=backends, trace=trace, subtract_trace_time=subtract_trace_time) as prof: fun(*args, **kwargs) if retime: with prof.retime(): fun(*args, **kwargs) if call_count > 1: with prof._accumulate_average(call_count): for _ in range(call_count - 1): fun(*args, **kwargs) return prof def _start_profiling(prof: Profile, backends: tuple or list): _PROFILE.append(prof) original_default = _DEFAULT[-1] original_backends = tuple(BACKENDS) for i, backend in enumerate(backends): prof_backend = ProfilingBackend(prof, backend, i) BACKENDS[BACKENDS.index(backend)] = prof_backend if _DEFAULT[-1] == backend: _DEFAULT[-1] = prof_backend return original_backends, original_default def _stop_profiling(prof: Profile, original_backends, original_default): prof._finish() _PROFILE.pop(-1) BACKENDS.clear() BACKENDS.extend(original_backends) _DEFAULT[-1] = original_default def get_current_profile() -> Optional[Profile]: """ Returns the currently active `Profile` if one is active. Otherwise returns `None`. """ return _PROFILE[-1] if _PROFILE else None
backend_call.add_arg("Outputs", _format_values({0: result}, backend_call._backend))
conditional_block
wps.go
// SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved. // // This software is provided under under a slightly modified version // of the Apache Software License. See the accompanying LICENSE file // for more information. // // Description: // WPS packets // // Author: // Aureliano Calvo import array import struct from impacket.helper import ProtocolPacket, Byte, Bit from functools import reduce type ArrayBuilder struct { // object: func (self TYPE) from_ary(ary interface{}){
type ByteBuilder struct { // object: func (self TYPE) from_ary(ary interface{}){ return ary[0] func (self TYPE) to_ary(value interface{}){ return array.array('B', [value]) type StringBuilder struct { // object: func (self TYPE) from_ary(ary interface{}){ return ary.tostring() func (self TYPE) to_ary(value interface{}){ return array.array('B', value) type NumBuilder struct { // object: """Converts back and forth between arrays and numbers in network byte-order""" func (self TYPE) __init__(size interface{}){ """size: number of bytes in the field""" self.size = size func (self TYPE) from_ary(ary interface{}){ if len(ary) != self.size { raise Exception("Expected %s size but got %s" % (self.size, len(ary))) return reduce( lambda ac, x: ac * 256 + x, ary, 0) func (self TYPE) to_ary(value0 interface{}){ value = value0 rv = array.array("B") for _ in range(self.size): value, mod = divmod(value, 256) rv.append(mod) if value != 0 { raise Exception("%s is too big. Max size: %s" % (value0, self.size)) rv.reverse() return rv type TLVContainer struct { // object: func (self TYPE) builder(kind interface{}){ return self.builders.get(kind, self.default_builder) func (self TYPE) from_ary(ary interface{}){ i = 0 while i<len(ary): kind = self.ary2n(ary, i) length = self.ary2n(ary, i+2) i+=4 value = ary[i:i+length] self.elems.append((kind, value)) i += length return self func (self TYPE) __init__(builders, default_builder = ArrayBuilder(), descs=nil interface{}){ self.builders = builders self.default_builder = default_builder self.elems = [] self.descs = descs or {} func (self TYPE) append(kind, value interface{}){ self.elems.append((kind, self.builder(kind).to_ary(value))) func (self TYPE) __iter__(){ return ((k, self.builder(k).from_ary(v)) for k,v in self.elems) func (self TYPE) all(kind interface{}){ return [e[1] for e in self if e[0] == kind] func (self TYPE) __contains__(kind interface{}){ return len(self.all(kind)) != 0 func (self TYPE) first(kind interface{}){ return self.all(kind)[0] func (self TYPE) to_ary(){ ary = array.array("B") for k,v in self.elems: ary.extend(self.n2ary(k)) ary.extend(self.n2ary(len(v))) ary.extend(v) return ary func (self TYPE) get_packet(){ return self.to_ary().tostring() func (self TYPE) set_parent(my_parent interface{}){ self.__parent = my_parent func (self TYPE) parent(){ return self.__parent func (self TYPE) n2ary(n interface{}){ return array.array("B", struct.pack(">H",n)) func (self TYPE) ary2n(ary, i=0 interface{}){ return struct.unpack(">H", ary[i:i+2].tostring())[0] func (self TYPE) __repr__(){ func desc(kind interface{}){ return self.descs[kind] if kind in self.descs else kind return "<TLVContainer %s>" % repr([(desc(k), self.builder(k).from_ary(v)) for (k,v) in self.elems]) func (self TYPE) child(){ return nil type SCElem struct { // object: //Data elements as defined in section 11 of the WPS 1.0h spec. AP_CHANNEL = 0x1001 ASSOCIATION_STATE = 0x1002 AUTHENTICATION_TYPE = 0x1003 AUTHENTICATION_TYPE_FLAGS = 0x1004 AUTHENTICATOR = 0x1005 CONFIG_METHODS = 0x1008 CONFIGURATION_ERROR = 0x1009 CONFIRMATION_URL4 = 0x100A CONFIRMATION_URL6 = 0x100B CONNECTION_TYPE = 0X100C CONNECTION_TYPE_FLAGS = 0X100D CREDENTIAL = 0X100E DEVICE_NAME = 0x1011 DEVICE_PASSWORD_ID = 0x1012 E_HASH1 = 0x1014 E_HASH2 = 0x1015 E_SNONCE1 = 0x1016 E_SNONCE2 = 0x1017 ENCRYPTED_SETTINGS = 0x1018 ENCRYPTION_TYPE = 0X100F ENCRYPTION_TYPE_FLAGS = 0x1010 ENROLLEE_NONCE = 0x101A FEATURE_ID = 0x101B IDENTITY = 0X101C INDENTITY_PROOF = 0X101D KEY_WRAP_AUTHENTICATOR = 0x101E KEY_IDENTIFIER = 0X101F MAC_ADDRESS = 0x1020 MANUFACTURER = 0x1021 MESSAGE_TYPE = 0x1022 MODEL_NAME = 0x1023 MODEL_NUMBER = 0x1024 NETWORK_INDEX = 0x1026 NETWORK_KEY = 0x1027 NETWORK_KEY_INDEX = 0x1028 NEW_DEVICE_NAME = 0x1029 NEW_PASSWORD = 0x102A OOB_DEVICE_PASSWORD = 0X102C OS_VERSION= 0X102D POWER_LEVEL = 0X102F PSK_CURRENT = 0x1030 PSK_MAX = 0x1031 PUBLIC_KEY = 0x1032 RADIO_ENABLED = 0x1033 REBOOT = 0x1034 REGISTRAR_CURRENT = 0x1035 REGISTRAR_ESTABLISHED = 0x1036 REGISTRAR_LIST = 0x1037 REGISTRAR_MAX = 0x1038 REGISTRAR_NONCE = 0x1039 REQUEST_TYPE = 0x103A RESPONSE_TYPE = 0x103B RF_BANDS = 0X103C R_HASH1 = 0X103D R_HASH2 = 0X103E R_SNONCE1 = 0X103F R_SNONCE2 = 0x1040 SELECTED_REGISTRAR = 0x1041 SERIAL_NUMBER = 0x1042 WPS_STATE = 0x1044 SSID = 0x1045 TOTAL_NETWORKS = 0x1046 UUID_E = 0x1047 UUID_R = 0x1048 VENDOR_EXTENSION = 0x1049 VERSION = 0x104A X_509_CERTIFICATE_REQUEST = 0x104B X_509_CERTIFICATE = 0x104C EAP_IDENTITY = 0x104D MESSAGE_COUNTER = 0x104E PUBLIC_KEY_HASH = 0x104F REKEY_KEY = 0x1050 KEY_LIFETIME = 0x1051 PERMITTED_CONFIG_METHODS = 0x1052 SELECTED_REGISTRAR_CONFIG_METHODS= 0x1053 PRIMARY_DEVICE_TYPE = 0x1054 SECONDARY_DEVICE_TYPE_LIST = 0x1055 PORTABLE_DEVICE = 0x1056 AP_SETUP_LOCKED = 0x1057 APPLICATION_EXTENSION = 0x1058 EAP_TYPE = 0x1059 INITIALIZATION_VECTOR = 0x1060 KEY_PROVIDED_AUTOMATICALLY = 0x1061 _802_1X_ENABLED = 0x1062 APP_SESSION_KEY = 0x1063 WEP_TRANSMIT_KEY = 0x1064 type MessageType struct { // object: """Message types according to WPS 1.0h spec, section 11""" BEACON = 0x01 PROBE_REQUEST = 0x02 PROBE_RESPONSE = 0x03 M1 = 0x04 M2 = 0x05 M2D = 0x06 M3 = 0x07 M4 = 0x08 M5 = 0x09 M6 = 0x0A M7 = 0x0B M8 = 0x0C WSC_ACK = 0x0D WSC_NACK = 0x0E WSC_DONE = 0x0F type AuthTypeFlag struct { // object: OPEN = 0x0001 WPAPSK = 0x0002 SHARED = 0x0004 WPA = 0x0008 WPA2 = 0x0010 WPA2PSK = 0x0020 AuthTypeFlag_ALL = AuthTypeFlag.OPEN | \ AuthTypeFlag.WPAPSK | \ AuthTypeFlag.SHARED | \ AuthTypeFlag.WPA | \ AuthTypeFlag.WPA2 | \ AuthTypeFlag.WPA2PSK type EncryptionTypeFlag struct { // object: NONE = 0x0001 WEP = 0x0002 TKIP = 0x0004 AES = 0x0008 EncryptionTypeFlag_ALL = EncryptionTypeFlag.NONE | EncryptionTypeFlag.WEP | EncryptionTypeFlag.TKIP | EncryptionTypeFlag.AES type ConnectionTypeFlag struct { // object: ESS = 0x01 IBSS = 0x02 type ConfigMethod struct { // object: USBA = 0x0001 ETHERNET = 0x0002 LABEL = 0x0004 DISPLAY = 0x0008 EXT_NFC_TOKEN = 0x0010 INT_NFC_TOKEN = 0x0020 NFC_INTERFACE = 0x0040 PUSHBUTTON = 0x0080 KEYPAD = 0x0100 type OpCode struct { // object: WSC_START = 0x01 WSC_ACK = 0x02 WSC_NACK = 0x03 WSC_MSG = 0x04 WSC_DONE = 0x05 WSC_FRAG_ACK = 0x06 type AssocState struct { // object: NOT_ASSOC = 0 CONN_SUCCESS = 1 CFG_FAILURE = 2 FAILURE = 3, IP_FAILURE = 4 type ConfigError struct { // object: NO_ERROR = 0 OOB_IFACE_READ_ERROR = 1 DECRYPTION_CRC_FAILURE = 2 _24_CHAN_NOT_SUPPORTED = 3 _50_CHAN_NOT_SUPPORTED = 4 SIGNAL_TOO_WEAK = 5 NETWORK_AUTH_FAILURE = 6 NETWORK_ASSOC_FAILURE = 7 NO_DHCP_RESPONSE = 8 FAILED_DHCP_CONFIG = 9 IP_ADDR_CONFLICT = 10 NO_CONN_TO_REGISTRAR = 11 MULTIPLE_PBC_DETECTED = 12 ROGUE_SUSPECTED = 13 DEVICE_BUSY = 14 SETUP_LOCKED = 15 MSG_TIMEOUT = 16 REG_SESS_TIMEOUT = 17 DEV_PASSWORD_AUTH_FAILURE = 18 type DevicePasswordId struct { // object: DEFAULT = 0x0000 USER_SPECIFIED = 0x0001 MACHINE_SPECIFIED = 0x0002 REKEY = 0x0003 PUSHBUTTON = 0x0004 REGISTRAR_SPECIFIED = 0x0005 type WpsState struct { // object: NOT_CONFIGURED = 0x01 CONFIGURED = 0x02 type SimpleConfig struct { // ProtocolPacket: "For now, it supports Simple configs with the bits more_fragments and length_field not set" header_size = 2 tail_size = 0 op_code = Byte(0) flags = Byte(1) more_fragments = Bit(1, 0) length_field = Bit(1,1) BUILDERS = { SCElem.CONNECTION_TYPE: ByteBuilder(), SCElem.CONNECTION_TYPE_FLAGS: ByteBuilder(), SCElem.VERSION: ByteBuilder(), SCElem.MESSAGE_TYPE: ByteBuilder(), SCElem.NETWORK_INDEX: ByteBuilder(), SCElem.NETWORK_KEY_INDEX: ByteBuilder(), SCElem.POWER_LEVEL: ByteBuilder(), SCElem.PSK_CURRENT: ByteBuilder(), SCElem.PSK_MAX: ByteBuilder(), SCElem.REGISTRAR_CURRENT: ByteBuilder(), SCElem.REGISTRAR_MAX: ByteBuilder(), SCElem.REQUEST_TYPE: ByteBuilder(), SCElem.RESPONSE_TYPE: ByteBuilder(), SCElem.RF_BANDS: ByteBuilder(), SCElem.WPS_STATE: ByteBuilder(), SCElem.TOTAL_NETWORKS: ByteBuilder(), SCElem.VERSION: ByteBuilder(), SCElem.WEP_TRANSMIT_KEY: ByteBuilder(), SCElem.CONFIRMATION_URL4: StringBuilder(), SCElem.CONFIRMATION_URL6: StringBuilder(), SCElem.DEVICE_NAME: StringBuilder(), SCElem.IDENTITY: StringBuilder(), SCElem.MANUFACTURER: StringBuilder(), SCElem.MODEL_NAME: StringBuilder(), SCElem.MODEL_NUMBER: StringBuilder(), SCElem.NEW_DEVICE_NAME: StringBuilder(), SCElem.NEW_PASSWORD: StringBuilder(), SCElem.SERIAL_NUMBER: StringBuilder(), SCElem.EAP_IDENTITY: StringBuilder(), SCElem.NETWORK_KEY: StringBuilder(), SCElem.AP_CHANNEL: NumBuilder(2), SCElem.ASSOCIATION_STATE: NumBuilder(2), SCElem.AUTHENTICATION_TYPE: NumBuilder(2), SCElem.AUTHENTICATION_TYPE_FLAGS: NumBuilder(2), SCElem.CONFIG_METHODS: NumBuilder(2), SCElem.CONFIGURATION_ERROR: NumBuilder(2), SCElem.DEVICE_PASSWORD_ID: NumBuilder(2), SCElem.ENCRYPTION_TYPE: NumBuilder(2), SCElem.ENCRYPTION_TYPE_FLAGS: NumBuilder(2), SCElem.MESSAGE_COUNTER: NumBuilder(8), SCElem.KEY_LIFETIME: NumBuilder(4), SCElem.PERMITTED_CONFIG_METHODS: NumBuilder(2), SCElem.SELECTED_REGISTRAR_CONFIG_METHODS: NumBuilder(2), SCElem.PUBLIC_KEY: NumBuilder(192), } @classmethod func build_tlv_container(cls interface{}){ return TLVContainer( builders=SimpleConfig.BUILDERS, descs = dict( (v,k) for (k,v) in SCElem.__dict__.items() ) }
return ary func (self TYPE) to_ary(value interface{}){ return array.array("B", value)
random_line_split
vmctx.rs
//! Interfaces for accessing instance data from hostcalls. //! //! This module contains both a Rust-friendly API ([`Vmctx`](struct.Vmctx.html)) as well as C-style //! exports for compatibility with hostcalls written against `lucet-runtime-c`. pub use crate::c_api::lucet_vmctx; use crate::alloc::instance_heap_offset; use crate::context::Context; use crate::error::Error; use crate::instance::{ Instance, InstanceInternal, State, TerminationDetails, CURRENT_INSTANCE, HOST_CTX, }; use lucet_module::{FunctionHandle, GlobalValue}; use std::any::Any; use std::borrow::{Borrow, BorrowMut}; use std::cell::{Ref, RefCell, RefMut}; /// An opaque handle to a running instance's context. #[derive(Debug)] pub struct Vmctx { vmctx: *mut lucet_vmctx, /// A view of the underlying instance's heap. /// /// This must never be dropped automatically, as the view does not own the heap. Rather, this is /// a value used to implement dynamic borrowing of the heap contents that are owned and managed /// by the instance and its `Alloc`. heap_view: RefCell<Box<[u8]>>, /// A view of the underlying instance's globals. /// /// This must never be dropped automatically, as the view does not own the globals. Rather, this /// is a value used to implement dynamic borrowing of the globals that are owned and managed by /// the instance and its `Alloc`. globals_view: RefCell<Box<[GlobalValue]>>, } impl Drop for Vmctx { fn drop(&mut self) { let heap_view = self.heap_view.replace(Box::new([])); let globals_view = self.globals_view.replace(Box::new([])); // as described in the definition of `Vmctx`, we cannot allow the boxed views of the heap // and globals to be dropped Box::leak(heap_view); Box::leak(globals_view); } } pub trait VmctxInternal { /// Get a reference to the `Instance` for this guest. fn instance(&self) -> &Instance; /// Get a mutable reference to the `Instance` for this guest. /// /// ### Safety /// /// Using this method, you could hold on to multiple mutable references to the same /// `Instance`. Only use one at a time! This method does not take `&mut self` because otherwise /// you could not use orthogonal `&mut` refs that come from `Vmctx`, like the heap or /// terminating the instance. unsafe fn instance_mut(&self) -> &mut Instance; } impl VmctxInternal for Vmctx { fn instance(&self) -> &Instance { unsafe { instance_from_vmctx(self.vmctx) } } unsafe fn instance_mut(&self) -> &mut Instance { instance_from_vmctx(self.vmctx) } } impl Vmctx { /// Create a `Vmctx` from the compiler-inserted `vmctx` argument in a guest function. /// /// This is almost certainly not what you want to use to get a `Vmctx`; instead use the `&mut /// Vmctx` argument to a `lucet_hostcalls!`-wrapped function. pub unsafe fn from_raw(vmctx: *mut lucet_vmctx) -> Vmctx
/// Return the underlying `vmctx` pointer. pub fn as_raw(&self) -> *mut lucet_vmctx { self.vmctx } /// Return the WebAssembly heap as a slice of bytes. /// /// If the heap is already mutably borrowed by `heap_mut()`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn heap(&self) -> Ref<'_, [u8]> { unsafe { self.reconstitute_heap_view_if_needed(); } let r = self .heap_view .try_borrow() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap"))); Ref::map(r, |b| b.borrow()) } /// Return the WebAssembly heap as a mutable slice of bytes. /// /// If the heap is already borrowed by `heap()` or `heap_mut()`, the instance will terminate /// with `TerminationDetails::BorrowError`. pub fn heap_mut(&self) -> RefMut<'_, [u8]> { unsafe { self.reconstitute_heap_view_if_needed(); } let r = self .heap_view .try_borrow_mut() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap_mut"))); RefMut::map(r, |b| b.borrow_mut()) } /// Check whether the heap has grown, and replace the heap view if it has. /// /// This handles the case where `Vmctx::grow_memory()` and `Vmctx::heap()` are called in /// sequence. Since `Vmctx::grow_memory()` takes `&mut self`, heap references cannot live across /// it. /// /// TODO: There is still an unsound case, though, when a heap reference is held across a call /// back into the guest via `Vmctx::get_func_from_idx()`. That guest code may grow the heap as /// well, causing any outstanding heap references to become invalid. We will address this when /// we rework the interface for calling back into the guest. unsafe fn reconstitute_heap_view_if_needed(&self) { let inst = self.instance_mut(); if inst.heap_mut().len() != self.heap_view.borrow().len() { let old_heap_view = self .heap_view .replace(Box::<[u8]>::from_raw(inst.heap_mut())); // as described in the definition of `Vmctx`, we cannot allow the boxed view of the heap // to be dropped Box::leak(old_heap_view); } } /// Check whether a given range in the host address space overlaps with the memory that backs /// the instance heap. pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool { self.instance().check_heap(ptr, len) } /// Check whether a context value of a particular type exists. pub fn contains_embed_ctx<T: Any>(&self) -> bool { self.instance().contains_embed_ctx::<T>() } /// Get a reference to a context value of a particular type. /// /// If a context of that type does not exist, the instance will terminate with /// `TerminationDetails::CtxNotFound`. /// /// If the context is already mutably borrowed by `get_embed_ctx_mut`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn get_embed_ctx<T: Any>(&self) -> Ref<'_, T> { match self.instance().embed_ctx.try_get::<T>() { Some(Ok(t)) => t, Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx")), None => panic!(TerminationDetails::CtxNotFound), } } /// Get a mutable reference to a context value of a particular type. /// /// If a context of that type does not exist, the instance will terminate with /// `TerminationDetails::CtxNotFound`. /// /// If the context is already borrowed by some other use of `get_embed_ctx` or /// `get_embed_ctx_mut`, the instance will terminate with `TerminationDetails::BorrowError`. pub fn get_embed_ctx_mut<T: Any>(&self) -> RefMut<'_, T> { match unsafe { self.instance_mut().embed_ctx.try_get_mut::<T>() } { Some(Ok(t)) => t, Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx_mut")), None => panic!(TerminationDetails::CtxNotFound), } } /// Terminate this guest and return to the host context without unwinding. /// /// This is almost certainly not what you want to use to terminate an instance from a hostcall, /// as any resources currently in scope will not be dropped. Instead, use /// `lucet_hostcall_terminate!` which unwinds to the enclosing hostcall body. pub unsafe fn terminate_no_unwind(&mut self, details: TerminationDetails) -> ! { self.instance_mut().terminate(details) } /// Grow the guest memory by the given number of WebAssembly pages. /// /// On success, returns the number of pages that existed before the call. pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> { unsafe { self.instance_mut().grow_memory(additional_pages) } } /// Return the WebAssembly globals as a slice of `i64`s. /// /// If the globals are already mutably borrowed by `globals_mut()`, the instance will terminate /// with `TerminationDetails::BorrowError`. pub fn globals(&self) -> Ref<'_, [GlobalValue]> { let r = self .globals_view .try_borrow() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals"))); Ref::map(r, |b| b.borrow()) } /// Return the WebAssembly globals as a mutable slice of `i64`s. /// /// If the globals are already borrowed by `globals()` or `globals_mut()`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn globals_mut(&self) -> RefMut<'_, [GlobalValue]> { let r = self .globals_view .try_borrow_mut() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals_mut"))); RefMut::map(r, |b| b.borrow_mut()) } /// Get a function pointer by WebAssembly table and function index. /// /// This is useful when a hostcall takes a function pointer as its argument, as WebAssembly uses /// table indices as its runtime representation of function pointers. /// /// We do not currently reflect function type information into the Rust type system, so callers /// of the returned function must take care to cast it to the correct type before calling. The /// correct type will include the `vmctx` argument, which the caller is responsible for passing /// from its own context. /// /// ```no_run /// use lucet_runtime_internals::{lucet_hostcalls, lucet_hostcall_terminate}; /// use lucet_runtime_internals::vmctx::{lucet_vmctx, Vmctx}; /// /// lucet_hostcalls! { /// #[no_mangle] /// pub unsafe extern "C" fn hostcall_call_binop( /// &mut vmctx, /// binop_table_idx: u32, /// binop_func_idx: u32, /// operand1: u32, /// operand2: u32, /// ) -> u32 { /// if let Ok(binop) = vmctx.get_func_from_idx(binop_table_idx, binop_func_idx) { /// let typed_binop = std::mem::transmute::< /// usize, /// extern "C" fn(*mut lucet_vmctx, u32, u32) -> u32 /// >(binop.ptr.as_usize()); /// unsafe { (typed_binop)(vmctx.as_raw(), operand1, operand2) } /// } else { /// lucet_hostcall_terminate!("invalid function index") /// } /// } /// } pub fn get_func_from_idx( &self, table_idx: u32, func_idx: u32, ) -> Result<FunctionHandle, Error> { self.instance() .module() .get_func_from_idx(table_idx, func_idx) } } /// Get an `Instance` from the `vmctx` pointer. /// /// Only safe to call from within the guest context. pub unsafe fn instance_from_vmctx<'a>(vmctx: *mut lucet_vmctx) -> &'a mut Instance { assert!(!vmctx.is_null(), "vmctx is not null"); let inst_ptr = (vmctx as usize - instance_heap_offset()) as *mut Instance; // We shouldn't actually need to access the thread local, only the exception handler should // need to. But, as long as the thread local exists, we should make sure that the guest // hasn't pulled any shenanigans and passed a bad vmctx. (Codegen should ensure the guest // cant pull any shenanigans but there have been bugs before.) CURRENT_INSTANCE.with(|current_instance| { if let Some(current_inst_ptr) = current_instance.borrow().map(|nn| nn.as_ptr()) { assert_eq!( inst_ptr, current_inst_ptr, "vmctx corresponds to current instance" ); } else { panic!( "current instance is not set; thread local storage failure can indicate \ dynamic linking issues" ); } }); let inst = inst_ptr.as_mut().unwrap(); assert!(inst.valid_magic()); inst } impl Instance { /// Terminate the guest and swap back to the host context without unwinding. /// /// This is almost certainly not what you want to use to terminate from a hostcall; use panics /// with `TerminationDetails` instead. unsafe fn terminate(&mut self, details: TerminationDetails) -> ! { self.state = State::Terminated { details }; #[allow(unused_unsafe)] // The following unsafe will be incorrectly warned as unused HOST_CTX.with(|host_ctx| unsafe { Context::set(&*host_ctx.get()) }) } }
{ let inst = instance_from_vmctx(vmctx); assert!(inst.valid_magic()); let res = Vmctx { vmctx, heap_view: RefCell::new(Box::<[u8]>::from_raw(inst.heap_mut())), globals_view: RefCell::new(Box::<[GlobalValue]>::from_raw(inst.globals_mut())), }; res }
identifier_body
vmctx.rs
//! Interfaces for accessing instance data from hostcalls. //! //! This module contains both a Rust-friendly API ([`Vmctx`](struct.Vmctx.html)) as well as C-style //! exports for compatibility with hostcalls written against `lucet-runtime-c`. pub use crate::c_api::lucet_vmctx; use crate::alloc::instance_heap_offset; use crate::context::Context; use crate::error::Error; use crate::instance::{ Instance, InstanceInternal, State, TerminationDetails, CURRENT_INSTANCE, HOST_CTX, }; use lucet_module::{FunctionHandle, GlobalValue}; use std::any::Any; use std::borrow::{Borrow, BorrowMut}; use std::cell::{Ref, RefCell, RefMut}; /// An opaque handle to a running instance's context. #[derive(Debug)] pub struct Vmctx { vmctx: *mut lucet_vmctx, /// A view of the underlying instance's heap. /// /// This must never be dropped automatically, as the view does not own the heap. Rather, this is /// a value used to implement dynamic borrowing of the heap contents that are owned and managed /// by the instance and its `Alloc`. heap_view: RefCell<Box<[u8]>>, /// A view of the underlying instance's globals. /// /// This must never be dropped automatically, as the view does not own the globals. Rather, this /// is a value used to implement dynamic borrowing of the globals that are owned and managed by /// the instance and its `Alloc`. globals_view: RefCell<Box<[GlobalValue]>>, } impl Drop for Vmctx { fn drop(&mut self) { let heap_view = self.heap_view.replace(Box::new([])); let globals_view = self.globals_view.replace(Box::new([])); // as described in the definition of `Vmctx`, we cannot allow the boxed views of the heap // and globals to be dropped Box::leak(heap_view); Box::leak(globals_view); } } pub trait VmctxInternal { /// Get a reference to the `Instance` for this guest. fn instance(&self) -> &Instance; /// Get a mutable reference to the `Instance` for this guest. /// /// ### Safety /// /// Using this method, you could hold on to multiple mutable references to the same /// `Instance`. Only use one at a time! This method does not take `&mut self` because otherwise /// you could not use orthogonal `&mut` refs that come from `Vmctx`, like the heap or /// terminating the instance. unsafe fn instance_mut(&self) -> &mut Instance; } impl VmctxInternal for Vmctx { fn instance(&self) -> &Instance { unsafe { instance_from_vmctx(self.vmctx) } } unsafe fn instance_mut(&self) -> &mut Instance { instance_from_vmctx(self.vmctx) } } impl Vmctx { /// Create a `Vmctx` from the compiler-inserted `vmctx` argument in a guest function. /// /// This is almost certainly not what you want to use to get a `Vmctx`; instead use the `&mut /// Vmctx` argument to a `lucet_hostcalls!`-wrapped function. pub unsafe fn from_raw(vmctx: *mut lucet_vmctx) -> Vmctx { let inst = instance_from_vmctx(vmctx); assert!(inst.valid_magic()); let res = Vmctx { vmctx, heap_view: RefCell::new(Box::<[u8]>::from_raw(inst.heap_mut())), globals_view: RefCell::new(Box::<[GlobalValue]>::from_raw(inst.globals_mut())), }; res } /// Return the underlying `vmctx` pointer. pub fn as_raw(&self) -> *mut lucet_vmctx { self.vmctx } /// Return the WebAssembly heap as a slice of bytes. /// /// If the heap is already mutably borrowed by `heap_mut()`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn heap(&self) -> Ref<'_, [u8]> { unsafe { self.reconstitute_heap_view_if_needed(); } let r = self .heap_view .try_borrow() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap"))); Ref::map(r, |b| b.borrow()) } /// Return the WebAssembly heap as a mutable slice of bytes. /// /// If the heap is already borrowed by `heap()` or `heap_mut()`, the instance will terminate /// with `TerminationDetails::BorrowError`. pub fn heap_mut(&self) -> RefMut<'_, [u8]> { unsafe { self.reconstitute_heap_view_if_needed(); } let r = self .heap_view .try_borrow_mut() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap_mut"))); RefMut::map(r, |b| b.borrow_mut()) } /// Check whether the heap has grown, and replace the heap view if it has. /// /// This handles the case where `Vmctx::grow_memory()` and `Vmctx::heap()` are called in /// sequence. Since `Vmctx::grow_memory()` takes `&mut self`, heap references cannot live across /// it. /// /// TODO: There is still an unsound case, though, when a heap reference is held across a call /// back into the guest via `Vmctx::get_func_from_idx()`. That guest code may grow the heap as /// well, causing any outstanding heap references to become invalid. We will address this when /// we rework the interface for calling back into the guest. unsafe fn reconstitute_heap_view_if_needed(&self) { let inst = self.instance_mut(); if inst.heap_mut().len() != self.heap_view.borrow().len() { let old_heap_view = self .heap_view .replace(Box::<[u8]>::from_raw(inst.heap_mut())); // as described in the definition of `Vmctx`, we cannot allow the boxed view of the heap // to be dropped Box::leak(old_heap_view); } } /// Check whether a given range in the host address space overlaps with the memory that backs /// the instance heap. pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool { self.instance().check_heap(ptr, len) } /// Check whether a context value of a particular type exists. pub fn contains_embed_ctx<T: Any>(&self) -> bool { self.instance().contains_embed_ctx::<T>() } /// Get a reference to a context value of a particular type. /// /// If a context of that type does not exist, the instance will terminate with /// `TerminationDetails::CtxNotFound`. /// /// If the context is already mutably borrowed by `get_embed_ctx_mut`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn get_embed_ctx<T: Any>(&self) -> Ref<'_, T> { match self.instance().embed_ctx.try_get::<T>() { Some(Ok(t)) => t, Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx")), None => panic!(TerminationDetails::CtxNotFound), } } /// Get a mutable reference to a context value of a particular type. /// /// If a context of that type does not exist, the instance will terminate with /// `TerminationDetails::CtxNotFound`. /// /// If the context is already borrowed by some other use of `get_embed_ctx` or /// `get_embed_ctx_mut`, the instance will terminate with `TerminationDetails::BorrowError`. pub fn get_embed_ctx_mut<T: Any>(&self) -> RefMut<'_, T> { match unsafe { self.instance_mut().embed_ctx.try_get_mut::<T>() } { Some(Ok(t)) => t, Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx_mut")), None => panic!(TerminationDetails::CtxNotFound), } } /// Terminate this guest and return to the host context without unwinding. /// /// This is almost certainly not what you want to use to terminate an instance from a hostcall, /// as any resources currently in scope will not be dropped. Instead, use /// `lucet_hostcall_terminate!` which unwinds to the enclosing hostcall body. pub unsafe fn terminate_no_unwind(&mut self, details: TerminationDetails) -> ! { self.instance_mut().terminate(details) } /// Grow the guest memory by the given number of WebAssembly pages. /// /// On success, returns the number of pages that existed before the call. pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> { unsafe { self.instance_mut().grow_memory(additional_pages) } } /// Return the WebAssembly globals as a slice of `i64`s. /// /// If the globals are already mutably borrowed by `globals_mut()`, the instance will terminate /// with `TerminationDetails::BorrowError`. pub fn globals(&self) -> Ref<'_, [GlobalValue]> { let r = self .globals_view .try_borrow() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals"))); Ref::map(r, |b| b.borrow()) } /// Return the WebAssembly globals as a mutable slice of `i64`s. /// /// If the globals are already borrowed by `globals()` or `globals_mut()`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn globals_mut(&self) -> RefMut<'_, [GlobalValue]> { let r = self .globals_view .try_borrow_mut() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals_mut"))); RefMut::map(r, |b| b.borrow_mut()) } /// Get a function pointer by WebAssembly table and function index. /// /// This is useful when a hostcall takes a function pointer as its argument, as WebAssembly uses /// table indices as its runtime representation of function pointers. /// /// We do not currently reflect function type information into the Rust type system, so callers /// of the returned function must take care to cast it to the correct type before calling. The /// correct type will include the `vmctx` argument, which the caller is responsible for passing /// from its own context. /// /// ```no_run /// use lucet_runtime_internals::{lucet_hostcalls, lucet_hostcall_terminate}; /// use lucet_runtime_internals::vmctx::{lucet_vmctx, Vmctx}; /// /// lucet_hostcalls! { /// #[no_mangle] /// pub unsafe extern "C" fn hostcall_call_binop( /// &mut vmctx, /// binop_table_idx: u32, /// binop_func_idx: u32, /// operand1: u32, /// operand2: u32, /// ) -> u32 { /// if let Ok(binop) = vmctx.get_func_from_idx(binop_table_idx, binop_func_idx) { /// let typed_binop = std::mem::transmute::< /// usize, /// extern "C" fn(*mut lucet_vmctx, u32, u32) -> u32 /// >(binop.ptr.as_usize()); /// unsafe { (typed_binop)(vmctx.as_raw(), operand1, operand2) } /// } else { /// lucet_hostcall_terminate!("invalid function index") /// } /// } /// } pub fn get_func_from_idx( &self, table_idx: u32, func_idx: u32, ) -> Result<FunctionHandle, Error> { self.instance() .module() .get_func_from_idx(table_idx, func_idx) } } /// Get an `Instance` from the `vmctx` pointer. /// /// Only safe to call from within the guest context. pub unsafe fn instance_from_vmctx<'a>(vmctx: *mut lucet_vmctx) -> &'a mut Instance { assert!(!vmctx.is_null(), "vmctx is not null"); let inst_ptr = (vmctx as usize - instance_heap_offset()) as *mut Instance; // We shouldn't actually need to access the thread local, only the exception handler should // need to. But, as long as the thread local exists, we should make sure that the guest // hasn't pulled any shenanigans and passed a bad vmctx. (Codegen should ensure the guest // cant pull any shenanigans but there have been bugs before.) CURRENT_INSTANCE.with(|current_instance| { if let Some(current_inst_ptr) = current_instance.borrow().map(|nn| nn.as_ptr()) { assert_eq!( inst_ptr, current_inst_ptr, "vmctx corresponds to current instance" ); } else { panic!( "current instance is not set; thread local storage failure can indicate \ dynamic linking issues" ); } }); let inst = inst_ptr.as_mut().unwrap(); assert!(inst.valid_magic()); inst } impl Instance { /// Terminate the guest and swap back to the host context without unwinding. /// /// This is almost certainly not what you want to use to terminate from a hostcall; use panics /// with `TerminationDetails` instead. unsafe fn terminate(&mut self, details: TerminationDetails) -> ! { self.state = State::Terminated { details }; #[allow(unused_unsafe)] // The following unsafe will be incorrectly warned as unused
}
HOST_CTX.with(|host_ctx| unsafe { Context::set(&*host_ctx.get()) }) }
random_line_split
vmctx.rs
//! Interfaces for accessing instance data from hostcalls. //! //! This module contains both a Rust-friendly API ([`Vmctx`](struct.Vmctx.html)) as well as C-style //! exports for compatibility with hostcalls written against `lucet-runtime-c`. pub use crate::c_api::lucet_vmctx; use crate::alloc::instance_heap_offset; use crate::context::Context; use crate::error::Error; use crate::instance::{ Instance, InstanceInternal, State, TerminationDetails, CURRENT_INSTANCE, HOST_CTX, }; use lucet_module::{FunctionHandle, GlobalValue}; use std::any::Any; use std::borrow::{Borrow, BorrowMut}; use std::cell::{Ref, RefCell, RefMut}; /// An opaque handle to a running instance's context. #[derive(Debug)] pub struct Vmctx { vmctx: *mut lucet_vmctx, /// A view of the underlying instance's heap. /// /// This must never be dropped automatically, as the view does not own the heap. Rather, this is /// a value used to implement dynamic borrowing of the heap contents that are owned and managed /// by the instance and its `Alloc`. heap_view: RefCell<Box<[u8]>>, /// A view of the underlying instance's globals. /// /// This must never be dropped automatically, as the view does not own the globals. Rather, this /// is a value used to implement dynamic borrowing of the globals that are owned and managed by /// the instance and its `Alloc`. globals_view: RefCell<Box<[GlobalValue]>>, } impl Drop for Vmctx { fn drop(&mut self) { let heap_view = self.heap_view.replace(Box::new([])); let globals_view = self.globals_view.replace(Box::new([])); // as described in the definition of `Vmctx`, we cannot allow the boxed views of the heap // and globals to be dropped Box::leak(heap_view); Box::leak(globals_view); } } pub trait VmctxInternal { /// Get a reference to the `Instance` for this guest. fn instance(&self) -> &Instance; /// Get a mutable reference to the `Instance` for this guest. /// /// ### Safety /// /// Using this method, you could hold on to multiple mutable references to the same /// `Instance`. Only use one at a time! This method does not take `&mut self` because otherwise /// you could not use orthogonal `&mut` refs that come from `Vmctx`, like the heap or /// terminating the instance. unsafe fn instance_mut(&self) -> &mut Instance; } impl VmctxInternal for Vmctx { fn instance(&self) -> &Instance { unsafe { instance_from_vmctx(self.vmctx) } } unsafe fn instance_mut(&self) -> &mut Instance { instance_from_vmctx(self.vmctx) } } impl Vmctx { /// Create a `Vmctx` from the compiler-inserted `vmctx` argument in a guest function. /// /// This is almost certainly not what you want to use to get a `Vmctx`; instead use the `&mut /// Vmctx` argument to a `lucet_hostcalls!`-wrapped function. pub unsafe fn from_raw(vmctx: *mut lucet_vmctx) -> Vmctx { let inst = instance_from_vmctx(vmctx); assert!(inst.valid_magic()); let res = Vmctx { vmctx, heap_view: RefCell::new(Box::<[u8]>::from_raw(inst.heap_mut())), globals_view: RefCell::new(Box::<[GlobalValue]>::from_raw(inst.globals_mut())), }; res } /// Return the underlying `vmctx` pointer. pub fn as_raw(&self) -> *mut lucet_vmctx { self.vmctx } /// Return the WebAssembly heap as a slice of bytes. /// /// If the heap is already mutably borrowed by `heap_mut()`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn heap(&self) -> Ref<'_, [u8]> { unsafe { self.reconstitute_heap_view_if_needed(); } let r = self .heap_view .try_borrow() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap"))); Ref::map(r, |b| b.borrow()) } /// Return the WebAssembly heap as a mutable slice of bytes. /// /// If the heap is already borrowed by `heap()` or `heap_mut()`, the instance will terminate /// with `TerminationDetails::BorrowError`. pub fn heap_mut(&self) -> RefMut<'_, [u8]> { unsafe { self.reconstitute_heap_view_if_needed(); } let r = self .heap_view .try_borrow_mut() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap_mut"))); RefMut::map(r, |b| b.borrow_mut()) } /// Check whether the heap has grown, and replace the heap view if it has. /// /// This handles the case where `Vmctx::grow_memory()` and `Vmctx::heap()` are called in /// sequence. Since `Vmctx::grow_memory()` takes `&mut self`, heap references cannot live across /// it. /// /// TODO: There is still an unsound case, though, when a heap reference is held across a call /// back into the guest via `Vmctx::get_func_from_idx()`. That guest code may grow the heap as /// well, causing any outstanding heap references to become invalid. We will address this when /// we rework the interface for calling back into the guest. unsafe fn reconstitute_heap_view_if_needed(&self) { let inst = self.instance_mut(); if inst.heap_mut().len() != self.heap_view.borrow().len() { let old_heap_view = self .heap_view .replace(Box::<[u8]>::from_raw(inst.heap_mut())); // as described in the definition of `Vmctx`, we cannot allow the boxed view of the heap // to be dropped Box::leak(old_heap_view); } } /// Check whether a given range in the host address space overlaps with the memory that backs /// the instance heap. pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool { self.instance().check_heap(ptr, len) } /// Check whether a context value of a particular type exists. pub fn contains_embed_ctx<T: Any>(&self) -> bool { self.instance().contains_embed_ctx::<T>() } /// Get a reference to a context value of a particular type. /// /// If a context of that type does not exist, the instance will terminate with /// `TerminationDetails::CtxNotFound`. /// /// If the context is already mutably borrowed by `get_embed_ctx_mut`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn get_embed_ctx<T: Any>(&self) -> Ref<'_, T> { match self.instance().embed_ctx.try_get::<T>() { Some(Ok(t)) => t, Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx")), None => panic!(TerminationDetails::CtxNotFound), } } /// Get a mutable reference to a context value of a particular type. /// /// If a context of that type does not exist, the instance will terminate with /// `TerminationDetails::CtxNotFound`. /// /// If the context is already borrowed by some other use of `get_embed_ctx` or /// `get_embed_ctx_mut`, the instance will terminate with `TerminationDetails::BorrowError`. pub fn get_embed_ctx_mut<T: Any>(&self) -> RefMut<'_, T> { match unsafe { self.instance_mut().embed_ctx.try_get_mut::<T>() } { Some(Ok(t)) => t, Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx_mut")), None => panic!(TerminationDetails::CtxNotFound), } } /// Terminate this guest and return to the host context without unwinding. /// /// This is almost certainly not what you want to use to terminate an instance from a hostcall, /// as any resources currently in scope will not be dropped. Instead, use /// `lucet_hostcall_terminate!` which unwinds to the enclosing hostcall body. pub unsafe fn terminate_no_unwind(&mut self, details: TerminationDetails) -> ! { self.instance_mut().terminate(details) } /// Grow the guest memory by the given number of WebAssembly pages. /// /// On success, returns the number of pages that existed before the call. pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> { unsafe { self.instance_mut().grow_memory(additional_pages) } } /// Return the WebAssembly globals as a slice of `i64`s. /// /// If the globals are already mutably borrowed by `globals_mut()`, the instance will terminate /// with `TerminationDetails::BorrowError`. pub fn globals(&self) -> Ref<'_, [GlobalValue]> { let r = self .globals_view .try_borrow() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals"))); Ref::map(r, |b| b.borrow()) } /// Return the WebAssembly globals as a mutable slice of `i64`s. /// /// If the globals are already borrowed by `globals()` or `globals_mut()`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn
(&self) -> RefMut<'_, [GlobalValue]> { let r = self .globals_view .try_borrow_mut() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals_mut"))); RefMut::map(r, |b| b.borrow_mut()) } /// Get a function pointer by WebAssembly table and function index. /// /// This is useful when a hostcall takes a function pointer as its argument, as WebAssembly uses /// table indices as its runtime representation of function pointers. /// /// We do not currently reflect function type information into the Rust type system, so callers /// of the returned function must take care to cast it to the correct type before calling. The /// correct type will include the `vmctx` argument, which the caller is responsible for passing /// from its own context. /// /// ```no_run /// use lucet_runtime_internals::{lucet_hostcalls, lucet_hostcall_terminate}; /// use lucet_runtime_internals::vmctx::{lucet_vmctx, Vmctx}; /// /// lucet_hostcalls! { /// #[no_mangle] /// pub unsafe extern "C" fn hostcall_call_binop( /// &mut vmctx, /// binop_table_idx: u32, /// binop_func_idx: u32, /// operand1: u32, /// operand2: u32, /// ) -> u32 { /// if let Ok(binop) = vmctx.get_func_from_idx(binop_table_idx, binop_func_idx) { /// let typed_binop = std::mem::transmute::< /// usize, /// extern "C" fn(*mut lucet_vmctx, u32, u32) -> u32 /// >(binop.ptr.as_usize()); /// unsafe { (typed_binop)(vmctx.as_raw(), operand1, operand2) } /// } else { /// lucet_hostcall_terminate!("invalid function index") /// } /// } /// } pub fn get_func_from_idx( &self, table_idx: u32, func_idx: u32, ) -> Result<FunctionHandle, Error> { self.instance() .module() .get_func_from_idx(table_idx, func_idx) } } /// Get an `Instance` from the `vmctx` pointer. /// /// Only safe to call from within the guest context. pub unsafe fn instance_from_vmctx<'a>(vmctx: *mut lucet_vmctx) -> &'a mut Instance { assert!(!vmctx.is_null(), "vmctx is not null"); let inst_ptr = (vmctx as usize - instance_heap_offset()) as *mut Instance; // We shouldn't actually need to access the thread local, only the exception handler should // need to. But, as long as the thread local exists, we should make sure that the guest // hasn't pulled any shenanigans and passed a bad vmctx. (Codegen should ensure the guest // cant pull any shenanigans but there have been bugs before.) CURRENT_INSTANCE.with(|current_instance| { if let Some(current_inst_ptr) = current_instance.borrow().map(|nn| nn.as_ptr()) { assert_eq!( inst_ptr, current_inst_ptr, "vmctx corresponds to current instance" ); } else { panic!( "current instance is not set; thread local storage failure can indicate \ dynamic linking issues" ); } }); let inst = inst_ptr.as_mut().unwrap(); assert!(inst.valid_magic()); inst } impl Instance { /// Terminate the guest and swap back to the host context without unwinding. /// /// This is almost certainly not what you want to use to terminate from a hostcall; use panics /// with `TerminationDetails` instead. unsafe fn terminate(&mut self, details: TerminationDetails) -> ! { self.state = State::Terminated { details }; #[allow(unused_unsafe)] // The following unsafe will be incorrectly warned as unused HOST_CTX.with(|host_ctx| unsafe { Context::set(&*host_ctx.get()) }) } }
globals_mut
identifier_name
tombfix.js
/* jshint camelcase:false, nomen:false, latedef:false, forin:false */ /* jshint maxparams:4 */ /* global Components */ (function executeTombfixService(global) { 'use strict'; const EXTENSION_ID = '[email protected]', {interfaces: Ci, classes: Cc, results: Cr, utils: Cu} = Components, // http://mxr.mozilla.org/mozilla-central/source/toolkit/modules/Services.jsm {Services} = Cu.import('resource://gre/modules/Services.jsm', {}), // http://mxr.mozilla.org/mozilla-central/source/js/xpconnect/loader/XPCOMUtils.jsm {XPCOMUtils} = Cu.import('resource://gre/modules/XPCOMUtils.jsm', {}), // http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/Console.jsm /* jshint ignore:start */ {console} = Cu.import( 'resource://gre/modules/devtools/Console.jsm', {} ), /* jshint ignore:end */ { appShell: AppShellService, scriptloader: ScriptLoader, wm: WindowMediator } = Services, FileProtocolHandler = getService( 'network/protocol;1?name=file', Ci.nsIFileProtocolHandler ), {nsILocalFile: ILocalFile} = Ci; const SCRIPT_FILES = [ // library/third_party 'MochiKit.js', 'twitter-text.js', // library 'component.js', 'expand.js', 'utility.js', 'tabWatcher.js', 'repository.js', 'models.js', 'Tombfix.Service.js', 'actions.js', 'extractors.js', 'ui.js' ]; // https://developer.mozilla.org/en-US/docs/Components.utils.importGlobalProperties Cu.importGlobalProperties(['File']); var getContentDir, Module, ModuleImpl; // ----[Application]-------------------------------------------- function getScriptFiles(dir) { var scripts = []; simpleIterator(dir.directoryEntries, ILocalFile, file => { if (/\.js$/.test(file.leafName)) { scripts.push(file); } }); return scripts; } function getLibraries() { var libDir, thirdPartyDir, scripts; libDir = getContentDir(); libDir.append('library'); thirdPartyDir = getContentDir(); thirdPartyDir.setRelativeDescriptor(thirdPartyDir, 'library'); thirdPartyDir.append('third_party'); scripts = getScriptFiles(thirdPartyDir).concat(getScriptFiles(libDir)); return SCRIPT_FILES.map(scriptName => { return scripts.find(file => file.leafName === scriptName); }); } function setupEnvironment(env) { var win = AppShellService.hiddenDOMWindow; // 変数/定数はhiddenDOMWindowのものを直接使う [ 'navigator', 'document', 'window', 'screen', 'XMLHttpRequest', 'XPathResult', 'Node', 'Element', 'KeyEvent', 'Event', 'DOMParser', 'XSLTProcessor', 'XMLSerializer', 'URL' ].forEach(propName => { env[propName] = win[propName]; }); // メソッドはthisが変わるとエラーになることがあるためbindして使う [ 'setTimeout', 'setInterval', 'clearTimeout', 'clearInterval', 'open', 'openDialog', 'atob', 'btoa' ].forEach(propName => { env[propName] = win[propName].bind(win); }); // モーダルにするためhiddenDOMWindowdではなく最新のウィンドウのメソッドを使う [ 'alert', 'confirm', 'prompt' ].forEach(propName => { env[propName] = forwardToWindow.bind(null, propName); }); } function forwardToWindow(propName, ...args) { var win = WindowMediator.getMostRecentWindow('navigator:browser'); return win[propName].apply(win, args); } // ----[Utility]-------------------------------------------- /* jshint ignore:start */
function getService(clsName, ifc) { try { let cls = Cc['@mozilla.org/' + clsName]; return cls ? (ifc ? cls.getService(ifc) : cls.getService()) : null; } catch (err) { return null; } } function loadAllSubScripts() { /* jshint validthis:true */ // libraryの読み込み loadSubScripts(getLibraries(), this); if (!this.getPref('disableAllScripts')) { // パッチの読み込み loadSubScripts(getScriptFiles(this.getPatchDir()), this); } } function loadSubScripts(files, global = function () {}) { var now = Date.now(); for (let file of files) { // クエリを付加しキャッシュを避ける ScriptLoader.loadSubScript( FileProtocolHandler.getURLSpecFromFile(file) + '?time=' + now, global, 'UTF-8' ); } } function simpleIterator(directoryEntries, ifc, func) { if (typeof ifc === 'string') { ifc = Ci[ifc]; } try { while (directoryEntries.hasMoreElements()) { let value = directoryEntries.getNext(); func(ifc ? value.QueryInterface(ifc) : value); } } catch (err) {} } function copy(target, obj, re) { for (let propName in obj) { if (!re || re.test(propName)) { target[propName] = obj[propName]; } } return target; } function exposeProperties(obj, recursive) { if (obj == null) { return; } Object.defineProperty(obj, '__exposedProps__', { value : {}, enumerable : false, writable : true, configurable : true }); for (let propName in obj) { obj.__exposedProps__[propName] = 'r'; if (recursive && typeof obj[propName] === 'object') { exposeProperties(obj[propName], true); } } } getContentDir = (function executeFunc() { var {AddonManager} = Cu.import( 'resource://gre/modules/AddonManager.jsm', {} ), dir = null, thread; AddonManager.getAddonByID(EXTENSION_ID, addon => { var target = addon.getResourceURI('/').QueryInterface(Ci.nsIFileURL) .file.QueryInterface(ILocalFile); target.setRelativeDescriptor(target, 'chrome/content'); dir = target; }); // using id:piro (http://piro.sakura.ne.jp/) method thread = getService('thread-manager;1').mainThread; while (dir === null) { thread.processNextEvent(true); } return function getContentDir() { return dir.clone(); }; }()); Module = { CID : Components.ID('{ab5cbd9b-56e1-42e4-8414-2201edb883e7}'), NAME : 'TombfixService', PID : '@tombfix.github.io/tombfix-service;1', initialized : false, onRegister : function onRegister() { XPCOMUtils.categoryManager.addCategoryEntry( 'content-policy', this.NAME, this.PID, true, true ); }, instance : { // http://mxr.mozilla.org/mozilla-central/source/content/base/public/nsIContentPolicy.idl shouldLoad : function shouldLoad() { return Ci.nsIContentPolicy.ACCEPT; }, shouldProcess : function shouldProcess() { return Ci.nsIContentPolicy.ACCEPT; }, QueryInterface : function queryInterface(iid) { if ( iid.equals(Ci.nsIContentPolicy) || iid.equals(Ci.nsISupports) || iid.equals(Ci.nsISupportsWeakReference) ) { return this; } throw Cr.NS_NOINTERFACE; } }, createInstance : function initialize(outer, iid) { var env, GM_Tombloo, GM_Tombfix; // nsIContentPolicyはhiddenDOMWindowの準備ができる前に取得される // 仮に応答できるオブジェクトを返し環境を構築できるまでの代替とする if (iid.equals(Ci.nsIContentPolicy)) { return this.instance; } // ブラウザが開かれるタイミングでインスタンスの要求を受け環境を初期化する // 2個目以降のウィンドウからは生成済みの環境を返す if (this.initialized) { return this.instance; } // 以降のコードはアプリケーション起動後に一度だけ通過する env = this.instance; // アプリケーション全体で、同じloadSubScripts関数を使いまわし汚染を防ぐ env.loadSubScripts = loadSubScripts; env.loadAllSubScripts = loadAllSubScripts; env.getContentDir = getContentDir; env.getLibraries = getLibraries; env.PID = this.PID; env.CID = this.CID; env.NAME = this.NAME; // ここでwindowやdocumentなどをenvに持ってくる setupEnvironment(env); // MochiKit内部で使用しているinstanceofで異常が発生するのを避ける env.MochiKit = {}; // for twttr env.twttr = env.window.twttr = {}; // libraryとパッチを読み込む env.loadAllSubScripts(); /* ここから他拡張用の処理 */ GM_Tombloo = copy({ Tombloo : { Service : copy( {}, env.Tombloo.Service, /(check|share|posters|extractors)/ ), }, }, env, /(Deferred|DeferredHash|copyString|notify)/); GM_Tombfix = copy({ Tombfix : { Service : copy( {}, env.Tombfix.Service, /(check|share|posters|extractors)/ ), }, }, env, /(Deferred|DeferredHash|copyString|notify)/); for (let modelName in env.Models) { if (env.Models.hasOwnProperty(modelName)) { GM_Tombfix[modelName] = GM_Tombloo[modelName] = copy( {}, env.Models[modelName], /^(?!.*(password|cookie))/i ); } } // 他拡張からの読み取りを許可する(Firefox 17用) exposeProperties(GM_Tombloo, true); exposeProperties(GM_Tombfix, true); // Scriptishサンドボックスの拡張 try { let scope = Cu.import('resource://scriptish/api.js', {}); scope.GM_API.prototype.GM_Tombloo = GM_Tombloo; scope.GM_API.prototype.GM_Tombfix = GM_Tombfix; } catch (err) { /* インストールされていない場合や無効になっている場合にエラーになる */ } /* 他拡張用の処理ここまで */ // 以降は初期化の最終処理 env.signal(env, 'environment-load'); this.initialized = true; return env; } }; // http://mxr.mozilla.org/mozilla-central/source/xpcom/components/nsIModule.idl ModuleImpl = { registerSelf : function registerSelf(compMgr, fileSpec, location, type) { compMgr.QueryInterface(Ci.nsIComponentRegistrar) .registerFactoryLocation( Module.CID, Module.NAME, Module.PID, fileSpec, location, type ); if (Module.onRegister) { Module.onRegister(compMgr, fileSpec, location, type); } }, canUnload : function canUnload() { return true; }, getClassObject : function getClassObject(compMgr, cid, iid) { if (!cid.equals(Module.CID)) { throw Cr.NS_ERROR_NO_INTERFACE; } if (!iid.equals(Ci.nsIFactory)) { throw Cr.NS_ERROR_NOT_IMPLEMENTED; } if (Module.onInit) { Module.onInit(compMgr, cid, iid); } return this.factory; }, factory : { createInstance: function createInstance(outer, iid) { var obj; if (outer != null) { throw Cr.NS_ERROR_NO_AGGREGATION; } obj = Module.createInstance(outer, iid); obj.Module = Module; obj.wrappedJSObject = obj; return obj; } } }; // https://developer.mozilla.org/en-US/docs/Mozilla/XPCOM/XPCOM_changes_in_Gecko_2.0#JavaScript_components global.NSGetFactory = function NSGetFactory() { return ModuleImpl.factory; }; }(this));
function log(msg) { console[typeof msg === 'object' ? 'dir' : 'log'](msg); } /* jshint ignore:end */
random_line_split
tombfix.js
/* jshint camelcase:false, nomen:false, latedef:false, forin:false */ /* jshint maxparams:4 */ /* global Components */ (function executeTombfixService(global) { 'use strict'; const EXTENSION_ID = '[email protected]', {interfaces: Ci, classes: Cc, results: Cr, utils: Cu} = Components, // http://mxr.mozilla.org/mozilla-central/source/toolkit/modules/Services.jsm {Services} = Cu.import('resource://gre/modules/Services.jsm', {}), // http://mxr.mozilla.org/mozilla-central/source/js/xpconnect/loader/XPCOMUtils.jsm {XPCOMUtils} = Cu.import('resource://gre/modules/XPCOMUtils.jsm', {}), // http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/Console.jsm /* jshint ignore:start */ {console} = Cu.import( 'resource://gre/modules/devtools/Console.jsm', {} ), /* jshint ignore:end */ { appShell: AppShellService, scriptloader: ScriptLoader, wm: WindowMediator } = Services, FileProtocolHandler = getService( 'network/protocol;1?name=file', Ci.nsIFileProtocolHandler ), {nsILocalFile: ILocalFile} = Ci; const SCRIPT_FILES = [ // library/third_party 'MochiKit.js', 'twitter-text.js', // library 'component.js', 'expand.js', 'utility.js', 'tabWatcher.js', 'repository.js', 'models.js', 'Tombfix.Service.js', 'actions.js', 'extractors.js', 'ui.js' ]; // https://developer.mozilla.org/en-US/docs/Components.utils.importGlobalProperties Cu.importGlobalProperties(['File']); var getContentDir, Module, ModuleImpl; // ----[Application]-------------------------------------------- function getScriptFiles(dir) { var scripts = []; simpleIterator(dir.directoryEntries, ILocalFile, file => { if (/\.js$/.test(file.leafName)) { scripts.push(file); } }); return scripts; } function getLibraries() { var libDir, thirdPartyDir, scripts; libDir = getContentDir(); libDir.append('library'); thirdPartyDir = getContentDir(); thirdPartyDir.setRelativeDescriptor(thirdPartyDir, 'library'); thirdPartyDir.append('third_party'); scripts = getScriptFiles(thirdPartyDir).concat(getScriptFiles(libDir)); return SCRIPT_FILES.map(scriptName => { return scripts.find(file => file.leafName === scriptName); }); } function setupEnvironment(env) { var win = AppShellService.hiddenDOMWindow; // 変数/定数はhiddenDOMWindowのものを直接使う [ 'navigator', 'document', 'window', 'screen', 'XMLHttpRequest', 'XPathResult', 'Node', 'Element', 'KeyEvent', 'Event', 'DOMParser', 'XSLTProcessor', 'XMLSerializer', 'URL' ].forEach(propName => { env[propName] = win[propName]; }); // メソッドはthisが変わるとエラーになることがあるためbindして使う [ 'setTimeout', 'setInterval', 'clearTimeout', 'clearInterval', 'open', 'openDialog', 'atob', 'btoa' ].forEach(propName => { env[propName] = win[propName].bind(win); }); // モーダルにするためhiddenDOMWindowdではなく最新のウィンドウのメソッドを使う [ 'alert', 'confirm', 'prompt' ].forEach(propName => { env[propName] = forwardToWindow.bind(null, propName); }); } function forwardToWindow(propName, ...args) { var win = WindowMediator.getMostRecentWindow('navigator:browser'); return win[propName].apply(win, args); } // ----[Utility]-------------------------------------------- /* jshint ignore:start */ function log(msg) { console[typeof msg === 'object' ? 'dir' : 'log'](msg); } /* jshint ignore:end */ function getService(clsName, ifc) { try { let cls = Cc['@mozilla.org/' + clsName]; return cls ? (ifc ? cls.getService(ifc) : cls.getService()) : null; } catch (err) { return null; } } function loadAllSubScripts() { /* jshint validthis:true */ // libraryの読み込み loadSubScripts(getLibraries(), this); if (!this.getPref
ts')) { // パッチの読み込み loadSubScripts(getScriptFiles(this.getPatchDir()), this); } } function loadSubScripts(files, global = function () {}) { var now = Date.now(); for (let file of files) { // クエリを付加しキャッシュを避ける ScriptLoader.loadSubScript( FileProtocolHandler.getURLSpecFromFile(file) + '?time=' + now, global, 'UTF-8' ); } } function simpleIterator(directoryEntries, ifc, func) { if (typeof ifc === 'string') { ifc = Ci[ifc]; } try { while (directoryEntries.hasMoreElements()) { let value = directoryEntries.getNext(); func(ifc ? value.QueryInterface(ifc) : value); } } catch (err) {} } function copy(target, obj, re) { for (let propName in obj) { if (!re || re.test(propName)) { target[propName] = obj[propName]; } } return target; } function exposeProperties(obj, recursive) { if (obj == null) { return; } Object.defineProperty(obj, '__exposedProps__', { value : {}, enumerable : false, writable : true, configurable : true }); for (let propName in obj) { obj.__exposedProps__[propName] = 'r'; if (recursive && typeof obj[propName] === 'object') { exposeProperties(obj[propName], true); } } } getContentDir = (function executeFunc() { var {AddonManager} = Cu.import( 'resource://gre/modules/AddonManager.jsm', {} ), dir = null, thread; AddonManager.getAddonByID(EXTENSION_ID, addon => { var target = addon.getResourceURI('/').QueryInterface(Ci.nsIFileURL) .file.QueryInterface(ILocalFile); target.setRelativeDescriptor(target, 'chrome/content'); dir = target; }); // using id:piro (http://piro.sakura.ne.jp/) method thread = getService('thread-manager;1').mainThread; while (dir === null) { thread.processNextEvent(true); } return function getContentDir() { return dir.clone(); }; }()); Module = { CID : Components.ID('{ab5cbd9b-56e1-42e4-8414-2201edb883e7}'), NAME : 'TombfixService', PID : '@tombfix.github.io/tombfix-service;1', initialized : false, onRegister : function onRegister() { XPCOMUtils.categoryManager.addCategoryEntry( 'content-policy', this.NAME, this.PID, true, true ); }, instance : { // http://mxr.mozilla.org/mozilla-central/source/content/base/public/nsIContentPolicy.idl shouldLoad : function shouldLoad() { return Ci.nsIContentPolicy.ACCEPT; }, shouldProcess : function shouldProcess() { return Ci.nsIContentPolicy.ACCEPT; }, QueryInterface : function queryInterface(iid) { if ( iid.equals(Ci.nsIContentPolicy) || iid.equals(Ci.nsISupports) || iid.equals(Ci.nsISupportsWeakReference) ) { return this; } throw Cr.NS_NOINTERFACE; } }, createInstance : function initialize(outer, iid) { var env, GM_Tombloo, GM_Tombfix; // nsIContentPolicyはhiddenDOMWindowの準備ができる前に取得される // 仮に応答できるオブジェクトを返し環境を構築できるまでの代替とする if (iid.equals(Ci.nsIContentPolicy)) { return this.instance; } // ブラウザが開かれるタイミングでインスタンスの要求を受け環境を初期化する // 2個目以降のウィンドウからは生成済みの環境を返す if (this.initialized) { return this.instance; } // 以降のコードはアプリケーション起動後に一度だけ通過する env = this.instance; // アプリケーション全体で、同じloadSubScripts関数を使いまわし汚染を防ぐ env.loadSubScripts = loadSubScripts; env.loadAllSubScripts = loadAllSubScripts; env.getContentDir = getContentDir; env.getLibraries = getLibraries; env.PID = this.PID; env.CID = this.CID; env.NAME = this.NAME; // ここでwindowやdocumentなどをenvに持ってくる setupEnvironment(env); // MochiKit内部で使用しているinstanceofで異常が発生するのを避ける env.MochiKit = {}; // for twttr env.twttr = env.window.twttr = {}; // libraryとパッチを読み込む env.loadAllSubScripts(); /* ここから他拡張用の処理 */ GM_Tombloo = copy({ Tombloo : { Service : copy( {}, env.Tombloo.Service, /(check|share|posters|extractors)/ ), }, }, env, /(Deferred|DeferredHash|copyString|notify)/); GM_Tombfix = copy({ Tombfix : { Service : copy( {}, env.Tombfix.Service, /(check|share|posters|extractors)/ ), }, }, env, /(Deferred|DeferredHash|copyString|notify)/); for (let modelName in env.Models) { if (env.Models.hasOwnProperty(modelName)) { GM_Tombfix[modelName] = GM_Tombloo[modelName] = copy( {}, env.Models[modelName], /^(?!.*(password|cookie))/i ); } } // 他拡張からの読み取りを許可する(Firefox 17用) exposeProperties(GM_Tombloo, true); exposeProperties(GM_Tombfix, true); // Scriptishサンドボックスの拡張 try { let scope = Cu.import('resource://scriptish/api.js', {}); scope.GM_API.prototype.GM_Tombloo = GM_Tombloo; scope.GM_API.prototype.GM_Tombfix = GM_Tombfix; } catch (err) { /* インストールされていない場合や無効になっている場合にエラーになる */ } /* 他拡張用の処理ここまで */ // 以降は初期化の最終処理 env.signal(env, 'environment-load'); this.initialized = true; return env; } }; // http://mxr.mozilla.org/mozilla-central/source/xpcom/components/nsIModule.idl ModuleImpl = { registerSelf : function registerSelf(compMgr, fileSpec, location, type) { compMgr.QueryInterface(Ci.nsIComponentRegistrar) .registerFactoryLocation( Module.CID, Module.NAME, Module.PID, fileSpec, location, type ); if (Module.onRegister) { Module.onRegister(compMgr, fileSpec, location, type); } }, canUnload : function canUnload() { return true; }, getClassObject : function getClassObject(compMgr, cid, iid) { if (!cid.equals(Module.CID)) { throw Cr.NS_ERROR_NO_INTERFACE; } if (!iid.equals(Ci.nsIFactory)) { throw Cr.NS_ERROR_NOT_IMPLEMENTED; } if (Module.onInit) { Module.onInit(compMgr, cid, iid); } return this.factory; }, factory : { createInstance: function createInstance(outer, iid) { var obj; if (outer != null) { throw Cr.NS_ERROR_NO_AGGREGATION; } obj = Module.createInstance(outer, iid); obj.Module = Module; obj.wrappedJSObject = obj; return obj; } } }; // https://developer.mozilla.org/en-US/docs/Mozilla/XPCOM/XPCOM_changes_in_Gecko_2.0#JavaScript_components global.NSGetFactory = function NSGetFactory() { return ModuleImpl.factory; }; }(this));
('disableAllScrip
identifier_name
tombfix.js
/* jshint camelcase:false, nomen:false, latedef:false, forin:false */ /* jshint maxparams:4 */ /* global Components */ (function executeTombfixService(global) { 'use strict'; const EXTENSION_ID = '[email protected]', {interfaces: Ci, classes: Cc, results: Cr, utils: Cu} = Components, // http://mxr.mozilla.org/mozilla-central/source/toolkit/modules/Services.jsm {Services} = Cu.import('resource://gre/modules/Services.jsm', {}), // http://mxr.mozilla.org/mozilla-central/source/js/xpconnect/loader/XPCOMUtils.jsm {XPCOMUtils} = Cu.import('resource://gre/modules/XPCOMUtils.jsm', {}), // http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/Console.jsm /* jshint ignore:start */ {console} = Cu.import( 'resource://gre/modules/devtools/Console.jsm', {} ), /* jshint ignore:end */ { appShell: AppShellService, scriptloader: ScriptLoader, wm: WindowMediator } = Services, FileProtocolHandler = getService( 'network/protocol;1?name=file', Ci.nsIFileProtocolHandler ), {nsILocalFile: ILocalFile} = Ci; const SCRIPT_FILES = [ // library/third_party 'MochiKit.js', 'twitter-text.js', // library 'component.js', 'expand.js', 'utility.js', 'tabWatcher.js', 'repository.js', 'models.js', 'Tombfix.Service.js', 'actions.js', 'extractors.js', 'ui.js' ]; // https://developer.mozilla.org/en-US/docs/Components.utils.importGlobalProperties Cu.importGlobalProperties(['File']); var getContentDir, Module, ModuleImpl; // ----[Application]-------------------------------------------- function getScriptFiles(dir) { var scripts = []; simpleIterator(dir.directoryEntries, ILocalFile, file => { if (/\.js$/.test(file.leafName)) { scripts.push(file); } }); return scripts; } function getLibraries() { var libDir, thirdPartyDir, scripts; libDir = getContentDir(); libDir.append('library'); thirdPartyDir = getContentDir(); thirdPartyDir.setRelativeDescriptor(thirdPartyDir, 'library'); thirdPartyDir.append('third_party'); scripts = getScriptFiles(thirdPartyDir).concat(getScriptFiles(libDir)); return SCRIPT_FILES.map(scriptName => { return scripts.find(file => file.leafName === scriptName); }); } function setupEnvironment(env) { var win = AppShellService.hiddenDOMWindow; // 変数/定数はhiddenDOMWindowのものを直接使う [ 'navigator', 'document', 'window', 'screen', 'XMLHttpRequest', 'XPathResult', 'Node', 'Element', 'KeyEvent', 'Event', 'DOMParser', 'XSLTProcessor', 'XMLSerializer', 'URL' ].forEach(propName => { env[propName] = win[propName]; }); // メソッドはthisが変わるとエラーになることがあるためbindして使う [ 'setTimeout', 'setInterval', 'clearTimeout', 'clearInterval', 'open', 'openDialog', 'atob', 'btoa' ].forEach(propName => { env[propName] = win[propName].bind(win); }); // モーダルにするためhiddenDOMWindowdではなく最新のウィンドウのメソッドを使う [ 'alert', 'confirm', 'prompt' ].forEach(propName => { env[propName] = forwardToWindow.bind(null, propName); }); } function forwardToWindow(propName, ...args) { var win = WindowMediator.getMostRecentWindow('navigator:browser'); return win[propName].apply(win, args); } // ----[Utility]-------------------------------------------- /* jshint ignore:start */ function log(msg) { console[typeof msg === 'object' ? 'dir' : 'log'](msg); } /* jshint ignore:end */ function getService(clsName, ifc) { try
ls ? (ifc ? cls.getService(ifc) : cls.getService()) : null; } catch (err) { return null; } } function loadAllSubScripts() { /* jshint validthis:true */ // libraryの読み込み loadSubScripts(getLibraries(), this); if (!this.getPref('disableAllScripts')) { // パッチの読み込み loadSubScripts(getScriptFiles(this.getPatchDir()), this); } } function loadSubScripts(files, global = function () {}) { var now = Date.now(); for (let file of files) { // クエリを付加しキャッシュを避ける ScriptLoader.loadSubScript( FileProtocolHandler.getURLSpecFromFile(file) + '?time=' + now, global, 'UTF-8' ); } } function simpleIterator(directoryEntries, ifc, func) { if (typeof ifc === 'string') { ifc = Ci[ifc]; } try { while (directoryEntries.hasMoreElements()) { let value = directoryEntries.getNext(); func(ifc ? value.QueryInterface(ifc) : value); } } catch (err) {} } function copy(target, obj, re) { for (let propName in obj) { if (!re || re.test(propName)) { target[propName] = obj[propName]; } } return target; } function exposeProperties(obj, recursive) { if (obj == null) { return; } Object.defineProperty(obj, '__exposedProps__', { value : {}, enumerable : false, writable : true, configurable : true }); for (let propName in obj) { obj.__exposedProps__[propName] = 'r'; if (recursive && typeof obj[propName] === 'object') { exposeProperties(obj[propName], true); } } } getContentDir = (function executeFunc() { var {AddonManager} = Cu.import( 'resource://gre/modules/AddonManager.jsm', {} ), dir = null, thread; AddonManager.getAddonByID(EXTENSION_ID, addon => { var target = addon.getResourceURI('/').QueryInterface(Ci.nsIFileURL) .file.QueryInterface(ILocalFile); target.setRelativeDescriptor(target, 'chrome/content'); dir = target; }); // using id:piro (http://piro.sakura.ne.jp/) method thread = getService('thread-manager;1').mainThread; while (dir === null) { thread.processNextEvent(true); } return function getContentDir() { return dir.clone(); }; }()); Module = { CID : Components.ID('{ab5cbd9b-56e1-42e4-8414-2201edb883e7}'), NAME : 'TombfixService', PID : '@tombfix.github.io/tombfix-service;1', initialized : false, onRegister : function onRegister() { XPCOMUtils.categoryManager.addCategoryEntry( 'content-policy', this.NAME, this.PID, true, true ); }, instance : { // http://mxr.mozilla.org/mozilla-central/source/content/base/public/nsIContentPolicy.idl shouldLoad : function shouldLoad() { return Ci.nsIContentPolicy.ACCEPT; }, shouldProcess : function shouldProcess() { return Ci.nsIContentPolicy.ACCEPT; }, QueryInterface : function queryInterface(iid) { if ( iid.equals(Ci.nsIContentPolicy) || iid.equals(Ci.nsISupports) || iid.equals(Ci.nsISupportsWeakReference) ) { return this; } throw Cr.NS_NOINTERFACE; } }, createInstance : function initialize(outer, iid) { var env, GM_Tombloo, GM_Tombfix; // nsIContentPolicyはhiddenDOMWindowの準備ができる前に取得される // 仮に応答できるオブジェクトを返し環境を構築できるまでの代替とする if (iid.equals(Ci.nsIContentPolicy)) { return this.instance; } // ブラウザが開かれるタイミングでインスタンスの要求を受け環境を初期化する // 2個目以降のウィンドウからは生成済みの環境を返す if (this.initialized) { return this.instance; } // 以降のコードはアプリケーション起動後に一度だけ通過する env = this.instance; // アプリケーション全体で、同じloadSubScripts関数を使いまわし汚染を防ぐ env.loadSubScripts = loadSubScripts; env.loadAllSubScripts = loadAllSubScripts; env.getContentDir = getContentDir; env.getLibraries = getLibraries; env.PID = this.PID; env.CID = this.CID; env.NAME = this.NAME; // ここでwindowやdocumentなどをenvに持ってくる setupEnvironment(env); // MochiKit内部で使用しているinstanceofで異常が発生するのを避ける env.MochiKit = {}; // for twttr env.twttr = env.window.twttr = {}; // libraryとパッチを読み込む env.loadAllSubScripts(); /* ここから他拡張用の処理 */ GM_Tombloo = copy({ Tombloo : { Service : copy( {}, env.Tombloo.Service, /(check|share|posters|extractors)/ ), }, }, env, /(Deferred|DeferredHash|copyString|notify)/); GM_Tombfix = copy({ Tombfix : { Service : copy( {}, env.Tombfix.Service, /(check|share|posters|extractors)/ ), }, }, env, /(Deferred|DeferredHash|copyString|notify)/); for (let modelName in env.Models) { if (env.Models.hasOwnProperty(modelName)) { GM_Tombfix[modelName] = GM_Tombloo[modelName] = copy( {}, env.Models[modelName], /^(?!.*(password|cookie))/i ); } } // 他拡張からの読み取りを許可する(Firefox 17用) exposeProperties(GM_Tombloo, true); exposeProperties(GM_Tombfix, true); // Scriptishサンドボックスの拡張 try { let scope = Cu.import('resource://scriptish/api.js', {}); scope.GM_API.prototype.GM_Tombloo = GM_Tombloo; scope.GM_API.prototype.GM_Tombfix = GM_Tombfix; } catch (err) { /* インストールされていない場合や無効になっている場合にエラーになる */ } /* 他拡張用の処理ここまで */ // 以降は初期化の最終処理 env.signal(env, 'environment-load'); this.initialized = true; return env; } }; // http://mxr.mozilla.org/mozilla-central/source/xpcom/components/nsIModule.idl ModuleImpl = { registerSelf : function registerSelf(compMgr, fileSpec, location, type) { compMgr.QueryInterface(Ci.nsIComponentRegistrar) .registerFactoryLocation( Module.CID, Module.NAME, Module.PID, fileSpec, location, type ); if (Module.onRegister) { Module.onRegister(compMgr, fileSpec, location, type); } }, canUnload : function canUnload() { return true; }, getClassObject : function getClassObject(compMgr, cid, iid) { if (!cid.equals(Module.CID)) { throw Cr.NS_ERROR_NO_INTERFACE; } if (!iid.equals(Ci.nsIFactory)) { throw Cr.NS_ERROR_NOT_IMPLEMENTED; } if (Module.onInit) { Module.onInit(compMgr, cid, iid); } return this.factory; }, factory : { createInstance: function createInstance(outer, iid) { var obj; if (outer != null) { throw Cr.NS_ERROR_NO_AGGREGATION; } obj = Module.createInstance(outer, iid); obj.Module = Module; obj.wrappedJSObject = obj; return obj; } } }; // https://developer.mozilla.org/en-US/docs/Mozilla/XPCOM/XPCOM_changes_in_Gecko_2.0#JavaScript_components global.NSGetFactory = function NSGetFactory() { return ModuleImpl.factory; }; }(this));
{ let cls = Cc['@mozilla.org/' + clsName]; return c
identifier_body
tombfix.js
/* jshint camelcase:false, nomen:false, latedef:false, forin:false */ /* jshint maxparams:4 */ /* global Components */ (function executeTombfixService(global) { 'use strict'; const EXTENSION_ID = '[email protected]', {interfaces: Ci, classes: Cc, results: Cr, utils: Cu} = Components, // http://mxr.mozilla.org/mozilla-central/source/toolkit/modules/Services.jsm {Services} = Cu.import('resource://gre/modules/Services.jsm', {}), // http://mxr.mozilla.org/mozilla-central/source/js/xpconnect/loader/XPCOMUtils.jsm {XPCOMUtils} = Cu.import('resource://gre/modules/XPCOMUtils.jsm', {}), // http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/Console.jsm /* jshint ignore:start */ {console} = Cu.import( 'resource://gre/modules/devtools/Console.jsm', {} ), /* jshint ignore:end */ { appShell: AppShellService, scriptloader: ScriptLoader, wm: WindowMediator } = Services, FileProtocolHandler = getService( 'network/protocol;1?name=file', Ci.nsIFileProtocolHandler ), {nsILocalFile: ILocalFile} = Ci; const SCRIPT_FILES = [ // library/third_party 'MochiKit.js', 'twitter-text.js', // library 'component.js', 'expand.js', 'utility.js', 'tabWatcher.js', 'repository.js', 'models.js', 'Tombfix.Service.js', 'actions.js', 'extractors.js', 'ui.js' ]; // https://developer.mozilla.org/en-US/docs/Components.utils.importGlobalProperties Cu.importGlobalProperties(['File']); var getContentDir, Module, ModuleImpl; // ----[Application]-------------------------------------------- function getScriptFiles(dir) { var scripts = []; simpleIterator(dir.directoryEntries, ILocalFile, file => { if (/\.js$/.test(file.leafName))
}); return scripts; } function getLibraries() { var libDir, thirdPartyDir, scripts; libDir = getContentDir(); libDir.append('library'); thirdPartyDir = getContentDir(); thirdPartyDir.setRelativeDescriptor(thirdPartyDir, 'library'); thirdPartyDir.append('third_party'); scripts = getScriptFiles(thirdPartyDir).concat(getScriptFiles(libDir)); return SCRIPT_FILES.map(scriptName => { return scripts.find(file => file.leafName === scriptName); }); } function setupEnvironment(env) { var win = AppShellService.hiddenDOMWindow; // 変数/定数はhiddenDOMWindowのものを直接使う [ 'navigator', 'document', 'window', 'screen', 'XMLHttpRequest', 'XPathResult', 'Node', 'Element', 'KeyEvent', 'Event', 'DOMParser', 'XSLTProcessor', 'XMLSerializer', 'URL' ].forEach(propName => { env[propName] = win[propName]; }); // メソッドはthisが変わるとエラーになることがあるためbindして使う [ 'setTimeout', 'setInterval', 'clearTimeout', 'clearInterval', 'open', 'openDialog', 'atob', 'btoa' ].forEach(propName => { env[propName] = win[propName].bind(win); }); // モーダルにするためhiddenDOMWindowdではなく最新のウィンドウのメソッドを使う [ 'alert', 'confirm', 'prompt' ].forEach(propName => { env[propName] = forwardToWindow.bind(null, propName); }); } function forwardToWindow(propName, ...args) { var win = WindowMediator.getMostRecentWindow('navigator:browser'); return win[propName].apply(win, args); } // ----[Utility]-------------------------------------------- /* jshint ignore:start */ function log(msg) { console[typeof msg === 'object' ? 'dir' : 'log'](msg); } /* jshint ignore:end */ function getService(clsName, ifc) { try { let cls = Cc['@mozilla.org/' + clsName]; return cls ? (ifc ? cls.getService(ifc) : cls.getService()) : null; } catch (err) { return null; } } function loadAllSubScripts() { /* jshint validthis:true */ // libraryの読み込み loadSubScripts(getLibraries(), this); if (!this.getPref('disableAllScripts')) { // パッチの読み込み loadSubScripts(getScriptFiles(this.getPatchDir()), this); } } function loadSubScripts(files, global = function () {}) { var now = Date.now(); for (let file of files) { // クエリを付加しキャッシュを避ける ScriptLoader.loadSubScript( FileProtocolHandler.getURLSpecFromFile(file) + '?time=' + now, global, 'UTF-8' ); } } function simpleIterator(directoryEntries, ifc, func) { if (typeof ifc === 'string') { ifc = Ci[ifc]; } try { while (directoryEntries.hasMoreElements()) { let value = directoryEntries.getNext(); func(ifc ? value.QueryInterface(ifc) : value); } } catch (err) {} } function copy(target, obj, re) { for (let propName in obj) { if (!re || re.test(propName)) { target[propName] = obj[propName]; } } return target; } function exposeProperties(obj, recursive) { if (obj == null) { return; } Object.defineProperty(obj, '__exposedProps__', { value : {}, enumerable : false, writable : true, configurable : true }); for (let propName in obj) { obj.__exposedProps__[propName] = 'r'; if (recursive && typeof obj[propName] === 'object') { exposeProperties(obj[propName], true); } } } getContentDir = (function executeFunc() { var {AddonManager} = Cu.import( 'resource://gre/modules/AddonManager.jsm', {} ), dir = null, thread; AddonManager.getAddonByID(EXTENSION_ID, addon => { var target = addon.getResourceURI('/').QueryInterface(Ci.nsIFileURL) .file.QueryInterface(ILocalFile); target.setRelativeDescriptor(target, 'chrome/content'); dir = target; }); // using id:piro (http://piro.sakura.ne.jp/) method thread = getService('thread-manager;1').mainThread; while (dir === null) { thread.processNextEvent(true); } return function getContentDir() { return dir.clone(); }; }()); Module = { CID : Components.ID('{ab5cbd9b-56e1-42e4-8414-2201edb883e7}'), NAME : 'TombfixService', PID : '@tombfix.github.io/tombfix-service;1', initialized : false, onRegister : function onRegister() { XPCOMUtils.categoryManager.addCategoryEntry( 'content-policy', this.NAME, this.PID, true, true ); }, instance : { // http://mxr.mozilla.org/mozilla-central/source/content/base/public/nsIContentPolicy.idl shouldLoad : function shouldLoad() { return Ci.nsIContentPolicy.ACCEPT; }, shouldProcess : function shouldProcess() { return Ci.nsIContentPolicy.ACCEPT; }, QueryInterface : function queryInterface(iid) { if ( iid.equals(Ci.nsIContentPolicy) || iid.equals(Ci.nsISupports) || iid.equals(Ci.nsISupportsWeakReference) ) { return this; } throw Cr.NS_NOINTERFACE; } }, createInstance : function initialize(outer, iid) { var env, GM_Tombloo, GM_Tombfix; // nsIContentPolicyはhiddenDOMWindowの準備ができる前に取得される // 仮に応答できるオブジェクトを返し環境を構築できるまでの代替とする if (iid.equals(Ci.nsIContentPolicy)) { return this.instance; } // ブラウザが開かれるタイミングでインスタンスの要求を受け環境を初期化する // 2個目以降のウィンドウからは生成済みの環境を返す if (this.initialized) { return this.instance; } // 以降のコードはアプリケーション起動後に一度だけ通過する env = this.instance; // アプリケーション全体で、同じloadSubScripts関数を使いまわし汚染を防ぐ env.loadSubScripts = loadSubScripts; env.loadAllSubScripts = loadAllSubScripts; env.getContentDir = getContentDir; env.getLibraries = getLibraries; env.PID = this.PID; env.CID = this.CID; env.NAME = this.NAME; // ここでwindowやdocumentなどをenvに持ってくる setupEnvironment(env); // MochiKit内部で使用しているinstanceofで異常が発生するのを避ける env.MochiKit = {}; // for twttr env.twttr = env.window.twttr = {}; // libraryとパッチを読み込む env.loadAllSubScripts(); /* ここから他拡張用の処理 */ GM_Tombloo = copy({ Tombloo : { Service : copy( {}, env.Tombloo.Service, /(check|share|posters|extractors)/ ), }, }, env, /(Deferred|DeferredHash|copyString|notify)/); GM_Tombfix = copy({ Tombfix : { Service : copy( {}, env.Tombfix.Service, /(check|share|posters|extractors)/ ), }, }, env, /(Deferred|DeferredHash|copyString|notify)/); for (let modelName in env.Models) { if (env.Models.hasOwnProperty(modelName)) { GM_Tombfix[modelName] = GM_Tombloo[modelName] = copy( {}, env.Models[modelName], /^(?!.*(password|cookie))/i ); } } // 他拡張からの読み取りを許可する(Firefox 17用) exposeProperties(GM_Tombloo, true); exposeProperties(GM_Tombfix, true); // Scriptishサンドボックスの拡張 try { let scope = Cu.import('resource://scriptish/api.js', {}); scope.GM_API.prototype.GM_Tombloo = GM_Tombloo; scope.GM_API.prototype.GM_Tombfix = GM_Tombfix; } catch (err) { /* インストールされていない場合や無効になっている場合にエラーになる */ } /* 他拡張用の処理ここまで */ // 以降は初期化の最終処理 env.signal(env, 'environment-load'); this.initialized = true; return env; } }; // http://mxr.mozilla.org/mozilla-central/source/xpcom/components/nsIModule.idl ModuleImpl = { registerSelf : function registerSelf(compMgr, fileSpec, location, type) { compMgr.QueryInterface(Ci.nsIComponentRegistrar) .registerFactoryLocation( Module.CID, Module.NAME, Module.PID, fileSpec, location, type ); if (Module.onRegister) { Module.onRegister(compMgr, fileSpec, location, type); } }, canUnload : function canUnload() { return true; }, getClassObject : function getClassObject(compMgr, cid, iid) { if (!cid.equals(Module.CID)) { throw Cr.NS_ERROR_NO_INTERFACE; } if (!iid.equals(Ci.nsIFactory)) { throw Cr.NS_ERROR_NOT_IMPLEMENTED; } if (Module.onInit) { Module.onInit(compMgr, cid, iid); } return this.factory; }, factory : { createInstance: function createInstance(outer, iid) { var obj; if (outer != null) { throw Cr.NS_ERROR_NO_AGGREGATION; } obj = Module.createInstance(outer, iid); obj.Module = Module; obj.wrappedJSObject = obj; return obj; } } }; // https://developer.mozilla.org/en-US/docs/Mozilla/XPCOM/XPCOM_changes_in_Gecko_2.0#JavaScript_components global.NSGetFactory = function NSGetFactory() { return ModuleImpl.factory; }; }(this));
{ scripts.push(file); }
conditional_block
game.py
""" A space game written in Python. """ # Standard imports. import pygame import os import sys # Local imports. import components import drawing import ecs import input_handling import physics import resource import systems import utils class SpaceGameServices(ecs.GameServices): """ The services exposed to the entities. This is separate from the game class itself to try and keep control of the interface - since this is basically global state you can get at from anywhere. """ def __init__(self, game):
def get_renderer(self): return self.game.renderer def get_entity_manager(self): """ Return the entity manager. """ return self.game.entity_manager def get_resource_loader(self): """ Get the resource loader. """ return self.game.resource_loader def get_info(self): """ Return the information. """ return self.info def end_game(self): """ Stop the game from running. """ self.game.stop_running() def get_debug_level(self): """ Return the debug level. """ return self.debug_level def load(self): """ Load the game. """ self.game.load() def save(self): """ Save the game. """ self.game.save() def toggle_pause(self): """ Pause the game. """ self.game.toggle_pause() def step(self): """ Simulate one frame and then pause. """ self.game.step() class Game(object): """ Class glueing all of the building blocks together into an actual game. """ def __init__(self): """ Initialise the game systems. """ # Change directory into the directory above this file - the # one containng the 'res' tree. Note that if we've been built via # py2exe, we will actually be in a zip file so account for that. path = os.path.dirname(os.path.dirname(__file__)) if (os.path.basename(path) == "library.zip"): path = os.path.dirname(path) os.chdir( path ) sys.path += ["."] # Services exposed to the entities. self.game_services = SpaceGameServices(self) # The resource loader. self.resource_loader = resource.ResourceLoader() # The configuration. if os.path.isfile("./config.txt"): self.config = self.resource_loader.load_config_file_from("./config.txt") else: self.config = self.resource_loader.load_config_file("base_config.txt") # Create the renderer. renderer_name = self.config.get_or_default("renderer", "src.pygame_renderer.PygameRenderer") renderer_class = utils.lookup_type(renderer_name) screen_size = (self.config.get_or_default("screen_width", 1024), self.config.get_or_default("screen_height", 768)) self.renderer = renderer_class(screen_size, self.config, data_path="./res") # The resource loaded needs a renderer to load images etc. self.resource_loader.set_renderer(self.renderer) # The input handling system. self.input_handling = None # The enemy. self.wave_spawner = None # Create the entity manager. self.entity_manager = ecs.EntityManager(self.game_services) # Configure the resource loader. self.resource_loader.set_minimise_image_loading( self.config.get_or_default("minimise_image_loading", False) ) # The drawing visitor. self.drawing = drawing.Drawing(self.game_services) # Is the game running? self.running = False # Should we load the game? self.want_load = False # Should we pause the game? self.want_pause = False # Should we unpause the game? self.want_resume = False # Should we simulate one frame and then pause? self.want_step = False def stop_running(self): """ Stop the game from running. """ self.running = False def run(self): """ The game loop. This performs initialisation including setting up pygame, and shows a loading screen while certain resources are preloaded. Then, we enter the game loop wherein we remain until the game is over. """ # Initialise the pygame display. pygame.init() pygame.mixer.init() self.renderer.initialise() # Create the game systems. self.entity_manager.register_component_system(physics.Physics()) self.entity_manager.register_component_system(systems.FollowsTrackedSystem()) self.entity_manager.register_component_system(systems.TrackingSystem()) self.entity_manager.register_component_system(systems.LaunchesFightersSystem()) self.entity_manager.register_component_system(systems.KillOnTimerSystem()) self.entity_manager.register_component_system(systems.PowerSystem()) self.entity_manager.register_component_system(systems.ShieldSystem()) self.entity_manager.register_component_system(systems.TextSystem()) self.entity_manager.register_component_system(systems.AnimSystem()) self.entity_manager.register_component_system(systems.ThrusterSystem()) self.entity_manager.register_component_system(systems.ThrustersSystem()) self.entity_manager.register_component_system(systems.WaveSpawnerSystem()) self.entity_manager.register_component_system(systems.CameraSystem()) self.entity_manager.register_component_system(systems.TurretSystem()) self.entity_manager.register_component_system(systems.TurretsSystem()) self.entity_manager.register_component_system(systems.WeaponSystem()) # Preload certain images. self.resource_loader.preload() # Make the camera. camera = self.entity_manager.create_entity_with(components.Camera, components.Body, components.Tracking, components.FollowsTracked) camera.get_component(components.FollowsTracked).follow_type = "instant" # Draw debug info if requested. self.game_services.debug_level = self.config.get_or_default("debug", 0) # Make the player player = self.entity_manager.create_entity("player.txt") camera.get_component(components.Tracking).tracked.entity = player # Create a view to pass to the input handling - this lets it map between # world and screen coordinates. view = drawing.CameraView(self.renderer, camera) # Make the input handling system. self.input_handling = input_handling.InputHandling(view, self.game_services) # Create the wave spawner. if not self.config.get_or_default("peaceful_mode", False): self.entity_manager.register_component_system(systems.WaveSpawnerSystem()) # Make it so that bullets can damage things. self.entity_manager.get_system(physics.Physics).add_collision_handler( DamageCollisionHandler() ) # Set the scrolling background. self.drawing.set_background("res/images/857-tileable-classic-nebula-space-patterns/6.jpg") # Run the game loop. self.running = True fps = 60 clock = pygame.time.Clock() tick_time = 1.0/fps while self.running: # Has a load been requested? if self.want_load: self.entity_manager.load(open("space_game.save", "r")) self.want_load = False ## Create any queued objects self.entity_manager.create_queued_objects() # If a pause has been scheduled then pause the game. if self.want_pause: self.want_pause = False self.entity_manager.pause() # If an unpause has been scheduled then unpause the game. if self.want_resume: self.want_resume = False self.entity_manager.unpause() # If a step has been scheduled then advance a frame and schedule a # pause. if self.want_step: self.entity_manager.unpause() self.want_pause = True self.want_step = False # Input for e in pygame.event.get(): response = self.input_handling.handle_input(e) if response.quit_requested: self.running = False # Update the systems. self.entity_manager.update(tick_time) # Draw self.renderer.pre_render(view) self.drawing.draw(view) self.renderer.post_render() self.renderer.flip_buffers() # Maintain frame rate. clock.tick(fps) # Remember how long the frame took. limited_fps = 1.0/(clock.get_time() / 1000.0) raw_fps = 1.0/(clock.get_rawtime() / 1000.0) time_ratio = (1.0/fps) / (clock.get_time()/1000.0) self.game_services.info.update_framerate(limited_fps, raw_fps, time_ratio) # Finalise pygame.quit() def load(self): """ Schedule a load. """ self.want_load = True def save(self): """ Save the game. """ self.entity_manager.save(open("space_game.save", "w")) def toggle_pause(self): """ Schedule a pause. """ if self.entity_manager.paused(): self.want_resume = True else: self.want_pause = True def step(self): """ Schedule a step. """ self.want_step = True class DamageCollisionHandler(physics.CollisionHandler): """ Collision handler to apply bullet damage. """ def __init__(self): """ Constructor. """ # Match entities that cause damage on contact to entities that can be # damaged. physics.CollisionHandler.__init__( self, components.DamageOnContact, components.Hitpoints ) def handle_matching_collision(self, dmg, hp): """ Apply the logical effect of the collision and return the result. """ # Delegate to the function in 'systems'. systems.handle_damage_collision(dmg, hp) # Return the result ( we handled the collision. ) return physics.CollisionResult(True, True)
self.game = game self.info = ecs.GameInfo() self.debug_level = 0
identifier_body
game.py
""" A space game written in Python. """ # Standard imports. import pygame import os import sys # Local imports. import components import drawing import ecs import input_handling import physics import resource import systems import utils class SpaceGameServices(ecs.GameServices): """ The services exposed to the entities. This is separate from the game class itself to try and keep control of the interface - since this is basically global state you can get at from anywhere. """ def __init__(self, game): self.game = game self.info = ecs.GameInfo() self.debug_level = 0 def get_renderer(self): return self.game.renderer def get_entity_manager(self): """ Return the entity manager. """ return self.game.entity_manager def get_resource_loader(self): """ Get the resource loader. """ return self.game.resource_loader def get_info(self): """ Return the information. """ return self.info def end_game(self): """ Stop the game from running. """ self.game.stop_running() def get_debug_level(self): """ Return the debug level. """ return self.debug_level def load(self): """ Load the game. """ self.game.load() def save(self): """ Save the game. """ self.game.save() def toggle_pause(self): """ Pause the game. """ self.game.toggle_pause() def step(self): """ Simulate one frame and then pause. """ self.game.step() class Game(object): """ Class glueing all of the building blocks together into an actual game. """ def __init__(self): """ Initialise the game systems. """ # Change directory into the directory above this file - the # one containng the 'res' tree. Note that if we've been built via # py2exe, we will actually be in a zip file so account for that. path = os.path.dirname(os.path.dirname(__file__)) if (os.path.basename(path) == "library.zip"): path = os.path.dirname(path) os.chdir( path ) sys.path += ["."] # Services exposed to the entities. self.game_services = SpaceGameServices(self) # The resource loader. self.resource_loader = resource.ResourceLoader() # The configuration. if os.path.isfile("./config.txt"): self.config = self.resource_loader.load_config_file_from("./config.txt") else: self.config = self.resource_loader.load_config_file("base_config.txt") # Create the renderer. renderer_name = self.config.get_or_default("renderer", "src.pygame_renderer.PygameRenderer") renderer_class = utils.lookup_type(renderer_name) screen_size = (self.config.get_or_default("screen_width", 1024), self.config.get_or_default("screen_height", 768)) self.renderer = renderer_class(screen_size, self.config, data_path="./res") # The resource loaded needs a renderer to load images etc. self.resource_loader.set_renderer(self.renderer) # The input handling system. self.input_handling = None # The enemy. self.wave_spawner = None # Create the entity manager. self.entity_manager = ecs.EntityManager(self.game_services) # Configure the resource loader. self.resource_loader.set_minimise_image_loading( self.config.get_or_default("minimise_image_loading", False) ) # The drawing visitor. self.drawing = drawing.Drawing(self.game_services) # Is the game running? self.running = False # Should we load the game? self.want_load = False # Should we pause the game? self.want_pause = False # Should we unpause the game? self.want_resume = False # Should we simulate one frame and then pause? self.want_step = False def stop_running(self): """ Stop the game from running. """ self.running = False def run(self): """ The game loop. This performs initialisation including setting up pygame, and shows a loading screen while certain resources are preloaded. Then, we enter the game loop wherein we remain until the game is over. """ # Initialise the pygame display. pygame.init() pygame.mixer.init() self.renderer.initialise() # Create the game systems. self.entity_manager.register_component_system(physics.Physics()) self.entity_manager.register_component_system(systems.FollowsTrackedSystem()) self.entity_manager.register_component_system(systems.TrackingSystem()) self.entity_manager.register_component_system(systems.LaunchesFightersSystem()) self.entity_manager.register_component_system(systems.KillOnTimerSystem()) self.entity_manager.register_component_system(systems.PowerSystem()) self.entity_manager.register_component_system(systems.ShieldSystem()) self.entity_manager.register_component_system(systems.TextSystem()) self.entity_manager.register_component_system(systems.AnimSystem()) self.entity_manager.register_component_system(systems.ThrusterSystem()) self.entity_manager.register_component_system(systems.ThrustersSystem()) self.entity_manager.register_component_system(systems.WaveSpawnerSystem()) self.entity_manager.register_component_system(systems.CameraSystem()) self.entity_manager.register_component_system(systems.TurretSystem()) self.entity_manager.register_component_system(systems.TurretsSystem()) self.entity_manager.register_component_system(systems.WeaponSystem()) # Preload certain images. self.resource_loader.preload() # Make the camera. camera = self.entity_manager.create_entity_with(components.Camera, components.Body, components.Tracking, components.FollowsTracked) camera.get_component(components.FollowsTracked).follow_type = "instant" # Draw debug info if requested. self.game_services.debug_level = self.config.get_or_default("debug", 0) # Make the player player = self.entity_manager.create_entity("player.txt") camera.get_component(components.Tracking).tracked.entity = player # Create a view to pass to the input handling - this lets it map between # world and screen coordinates. view = drawing.CameraView(self.renderer, camera) # Make the input handling system. self.input_handling = input_handling.InputHandling(view, self.game_services) # Create the wave spawner. if not self.config.get_or_default("peaceful_mode", False): self.entity_manager.register_component_system(systems.WaveSpawnerSystem()) # Make it so that bullets can damage things. self.entity_manager.get_system(physics.Physics).add_collision_handler( DamageCollisionHandler() ) # Set the scrolling background. self.drawing.set_background("res/images/857-tileable-classic-nebula-space-patterns/6.jpg") # Run the game loop. self.running = True fps = 60 clock = pygame.time.Clock() tick_time = 1.0/fps while self.running: # Has a load been requested? if self.want_load: self.entity_manager.load(open("space_game.save", "r")) self.want_load = False ## Create any queued objects self.entity_manager.create_queued_objects() # If a pause has been scheduled then pause the game. if self.want_pause: self.want_pause = False self.entity_manager.pause() # If an unpause has been scheduled then unpause the game. if self.want_resume: self.want_resume = False self.entity_manager.unpause() # If a step has been scheduled then advance a frame and schedule a # pause. if self.want_step:
# Input for e in pygame.event.get(): response = self.input_handling.handle_input(e) if response.quit_requested: self.running = False # Update the systems. self.entity_manager.update(tick_time) # Draw self.renderer.pre_render(view) self.drawing.draw(view) self.renderer.post_render() self.renderer.flip_buffers() # Maintain frame rate. clock.tick(fps) # Remember how long the frame took. limited_fps = 1.0/(clock.get_time() / 1000.0) raw_fps = 1.0/(clock.get_rawtime() / 1000.0) time_ratio = (1.0/fps) / (clock.get_time()/1000.0) self.game_services.info.update_framerate(limited_fps, raw_fps, time_ratio) # Finalise pygame.quit() def load(self): """ Schedule a load. """ self.want_load = True def save(self): """ Save the game. """ self.entity_manager.save(open("space_game.save", "w")) def toggle_pause(self): """ Schedule a pause. """ if self.entity_manager.paused(): self.want_resume = True else: self.want_pause = True def step(self): """ Schedule a step. """ self.want_step = True class DamageCollisionHandler(physics.CollisionHandler): """ Collision handler to apply bullet damage. """ def __init__(self): """ Constructor. """ # Match entities that cause damage on contact to entities that can be # damaged. physics.CollisionHandler.__init__( self, components.DamageOnContact, components.Hitpoints ) def handle_matching_collision(self, dmg, hp): """ Apply the logical effect of the collision and return the result. """ # Delegate to the function in 'systems'. systems.handle_damage_collision(dmg, hp) # Return the result ( we handled the collision. ) return physics.CollisionResult(True, True)
self.entity_manager.unpause() self.want_pause = True self.want_step = False
conditional_block
game.py
""" A space game written in Python. """ # Standard imports. import pygame import os import sys # Local imports. import components import drawing import ecs import input_handling import physics import resource import systems import utils class SpaceGameServices(ecs.GameServices): """ The services exposed to the entities. This is separate from the game class itself to try and keep control of the interface - since this is basically global state you can get at from anywhere. """ def __init__(self, game): self.game = game self.info = ecs.GameInfo() self.debug_level = 0 def get_renderer(self): return self.game.renderer def get_entity_manager(self): """ Return the entity manager. """ return self.game.entity_manager def get_resource_loader(self): """ Get the resource loader. """ return self.game.resource_loader def get_info(self): """ Return the information. """ return self.info def end_game(self): """ Stop the game from running. """ self.game.stop_running() def get_debug_level(self): """ Return the debug level. """ return self.debug_level def load(self): """ Load the game. """ self.game.load() def save(self): """ Save the game. """ self.game.save() def toggle_pause(self): """ Pause the game. """ self.game.toggle_pause() def step(self): """ Simulate one frame and then pause. """ self.game.step() class Game(object): """ Class glueing all of the building blocks together into an actual game. """ def __init__(self): """ Initialise the game systems. """ # Change directory into the directory above this file - the # one containng the 'res' tree. Note that if we've been built via # py2exe, we will actually be in a zip file so account for that. path = os.path.dirname(os.path.dirname(__file__)) if (os.path.basename(path) == "library.zip"): path = os.path.dirname(path) os.chdir( path ) sys.path += ["."] # Services exposed to the entities. self.game_services = SpaceGameServices(self) # The resource loader. self.resource_loader = resource.ResourceLoader() # The configuration. if os.path.isfile("./config.txt"): self.config = self.resource_loader.load_config_file_from("./config.txt") else: self.config = self.resource_loader.load_config_file("base_config.txt") # Create the renderer. renderer_name = self.config.get_or_default("renderer", "src.pygame_renderer.PygameRenderer") renderer_class = utils.lookup_type(renderer_name) screen_size = (self.config.get_or_default("screen_width", 1024), self.config.get_or_default("screen_height", 768)) self.renderer = renderer_class(screen_size, self.config, data_path="./res") # The resource loaded needs a renderer to load images etc. self.resource_loader.set_renderer(self.renderer) # The input handling system. self.input_handling = None # The enemy. self.wave_spawner = None # Create the entity manager. self.entity_manager = ecs.EntityManager(self.game_services) # Configure the resource loader. self.resource_loader.set_minimise_image_loading( self.config.get_or_default("minimise_image_loading", False) ) # The drawing visitor. self.drawing = drawing.Drawing(self.game_services) # Is the game running? self.running = False # Should we load the game? self.want_load = False # Should we pause the game? self.want_pause = False # Should we unpause the game? self.want_resume = False # Should we simulate one frame and then pause? self.want_step = False def stop_running(self): """ Stop the game from running. """ self.running = False def run(self): """ The game loop. This performs initialisation including setting up pygame, and shows a loading screen while certain resources are preloaded. Then, we enter the game loop wherein we remain until the game is over. """ # Initialise the pygame display. pygame.init() pygame.mixer.init() self.renderer.initialise() # Create the game systems. self.entity_manager.register_component_system(physics.Physics()) self.entity_manager.register_component_system(systems.FollowsTrackedSystem()) self.entity_manager.register_component_system(systems.TrackingSystem()) self.entity_manager.register_component_system(systems.LaunchesFightersSystem()) self.entity_manager.register_component_system(systems.KillOnTimerSystem()) self.entity_manager.register_component_system(systems.PowerSystem()) self.entity_manager.register_component_system(systems.ShieldSystem()) self.entity_manager.register_component_system(systems.TextSystem()) self.entity_manager.register_component_system(systems.AnimSystem()) self.entity_manager.register_component_system(systems.ThrusterSystem()) self.entity_manager.register_component_system(systems.ThrustersSystem()) self.entity_manager.register_component_system(systems.WaveSpawnerSystem()) self.entity_manager.register_component_system(systems.CameraSystem()) self.entity_manager.register_component_system(systems.TurretSystem()) self.entity_manager.register_component_system(systems.TurretsSystem()) self.entity_manager.register_component_system(systems.WeaponSystem()) # Preload certain images. self.resource_loader.preload() # Make the camera. camera = self.entity_manager.create_entity_with(components.Camera, components.Body, components.Tracking, components.FollowsTracked) camera.get_component(components.FollowsTracked).follow_type = "instant" # Draw debug info if requested. self.game_services.debug_level = self.config.get_or_default("debug", 0) # Make the player player = self.entity_manager.create_entity("player.txt") camera.get_component(components.Tracking).tracked.entity = player # Create a view to pass to the input handling - this lets it map between # world and screen coordinates. view = drawing.CameraView(self.renderer, camera) # Make the input handling system. self.input_handling = input_handling.InputHandling(view, self.game_services) # Create the wave spawner. if not self.config.get_or_default("peaceful_mode", False): self.entity_manager.register_component_system(systems.WaveSpawnerSystem()) # Make it so that bullets can damage things. self.entity_manager.get_system(physics.Physics).add_collision_handler( DamageCollisionHandler() ) # Set the scrolling background. self.drawing.set_background("res/images/857-tileable-classic-nebula-space-patterns/6.jpg") # Run the game loop. self.running = True fps = 60 clock = pygame.time.Clock() tick_time = 1.0/fps while self.running: # Has a load been requested? if self.want_load: self.entity_manager.load(open("space_game.save", "r")) self.want_load = False ## Create any queued objects self.entity_manager.create_queued_objects() # If a pause has been scheduled then pause the game. if self.want_pause: self.want_pause = False self.entity_manager.pause() # If an unpause has been scheduled then unpause the game. if self.want_resume: self.want_resume = False self.entity_manager.unpause() # If a step has been scheduled then advance a frame and schedule a # pause. if self.want_step: self.entity_manager.unpause() self.want_pause = True self.want_step = False # Input for e in pygame.event.get(): response = self.input_handling.handle_input(e) if response.quit_requested: self.running = False # Update the systems. self.entity_manager.update(tick_time) # Draw self.renderer.pre_render(view) self.drawing.draw(view) self.renderer.post_render() self.renderer.flip_buffers()
limited_fps = 1.0/(clock.get_time() / 1000.0) raw_fps = 1.0/(clock.get_rawtime() / 1000.0) time_ratio = (1.0/fps) / (clock.get_time()/1000.0) self.game_services.info.update_framerate(limited_fps, raw_fps, time_ratio) # Finalise pygame.quit() def load(self): """ Schedule a load. """ self.want_load = True def save(self): """ Save the game. """ self.entity_manager.save(open("space_game.save", "w")) def toggle_pause(self): """ Schedule a pause. """ if self.entity_manager.paused(): self.want_resume = True else: self.want_pause = True def step(self): """ Schedule a step. """ self.want_step = True class DamageCollisionHandler(physics.CollisionHandler): """ Collision handler to apply bullet damage. """ def __init__(self): """ Constructor. """ # Match entities that cause damage on contact to entities that can be # damaged. physics.CollisionHandler.__init__( self, components.DamageOnContact, components.Hitpoints ) def handle_matching_collision(self, dmg, hp): """ Apply the logical effect of the collision and return the result. """ # Delegate to the function in 'systems'. systems.handle_damage_collision(dmg, hp) # Return the result ( we handled the collision. ) return physics.CollisionResult(True, True)
# Maintain frame rate. clock.tick(fps) # Remember how long the frame took.
random_line_split
game.py
""" A space game written in Python. """ # Standard imports. import pygame import os import sys # Local imports. import components import drawing import ecs import input_handling import physics import resource import systems import utils class SpaceGameServices(ecs.GameServices): """ The services exposed to the entities. This is separate from the game class itself to try and keep control of the interface - since this is basically global state you can get at from anywhere. """ def __init__(self, game): self.game = game self.info = ecs.GameInfo() self.debug_level = 0 def get_renderer(self): return self.game.renderer def
(self): """ Return the entity manager. """ return self.game.entity_manager def get_resource_loader(self): """ Get the resource loader. """ return self.game.resource_loader def get_info(self): """ Return the information. """ return self.info def end_game(self): """ Stop the game from running. """ self.game.stop_running() def get_debug_level(self): """ Return the debug level. """ return self.debug_level def load(self): """ Load the game. """ self.game.load() def save(self): """ Save the game. """ self.game.save() def toggle_pause(self): """ Pause the game. """ self.game.toggle_pause() def step(self): """ Simulate one frame and then pause. """ self.game.step() class Game(object): """ Class glueing all of the building blocks together into an actual game. """ def __init__(self): """ Initialise the game systems. """ # Change directory into the directory above this file - the # one containng the 'res' tree. Note that if we've been built via # py2exe, we will actually be in a zip file so account for that. path = os.path.dirname(os.path.dirname(__file__)) if (os.path.basename(path) == "library.zip"): path = os.path.dirname(path) os.chdir( path ) sys.path += ["."] # Services exposed to the entities. self.game_services = SpaceGameServices(self) # The resource loader. self.resource_loader = resource.ResourceLoader() # The configuration. if os.path.isfile("./config.txt"): self.config = self.resource_loader.load_config_file_from("./config.txt") else: self.config = self.resource_loader.load_config_file("base_config.txt") # Create the renderer. renderer_name = self.config.get_or_default("renderer", "src.pygame_renderer.PygameRenderer") renderer_class = utils.lookup_type(renderer_name) screen_size = (self.config.get_or_default("screen_width", 1024), self.config.get_or_default("screen_height", 768)) self.renderer = renderer_class(screen_size, self.config, data_path="./res") # The resource loaded needs a renderer to load images etc. self.resource_loader.set_renderer(self.renderer) # The input handling system. self.input_handling = None # The enemy. self.wave_spawner = None # Create the entity manager. self.entity_manager = ecs.EntityManager(self.game_services) # Configure the resource loader. self.resource_loader.set_minimise_image_loading( self.config.get_or_default("minimise_image_loading", False) ) # The drawing visitor. self.drawing = drawing.Drawing(self.game_services) # Is the game running? self.running = False # Should we load the game? self.want_load = False # Should we pause the game? self.want_pause = False # Should we unpause the game? self.want_resume = False # Should we simulate one frame and then pause? self.want_step = False def stop_running(self): """ Stop the game from running. """ self.running = False def run(self): """ The game loop. This performs initialisation including setting up pygame, and shows a loading screen while certain resources are preloaded. Then, we enter the game loop wherein we remain until the game is over. """ # Initialise the pygame display. pygame.init() pygame.mixer.init() self.renderer.initialise() # Create the game systems. self.entity_manager.register_component_system(physics.Physics()) self.entity_manager.register_component_system(systems.FollowsTrackedSystem()) self.entity_manager.register_component_system(systems.TrackingSystem()) self.entity_manager.register_component_system(systems.LaunchesFightersSystem()) self.entity_manager.register_component_system(systems.KillOnTimerSystem()) self.entity_manager.register_component_system(systems.PowerSystem()) self.entity_manager.register_component_system(systems.ShieldSystem()) self.entity_manager.register_component_system(systems.TextSystem()) self.entity_manager.register_component_system(systems.AnimSystem()) self.entity_manager.register_component_system(systems.ThrusterSystem()) self.entity_manager.register_component_system(systems.ThrustersSystem()) self.entity_manager.register_component_system(systems.WaveSpawnerSystem()) self.entity_manager.register_component_system(systems.CameraSystem()) self.entity_manager.register_component_system(systems.TurretSystem()) self.entity_manager.register_component_system(systems.TurretsSystem()) self.entity_manager.register_component_system(systems.WeaponSystem()) # Preload certain images. self.resource_loader.preload() # Make the camera. camera = self.entity_manager.create_entity_with(components.Camera, components.Body, components.Tracking, components.FollowsTracked) camera.get_component(components.FollowsTracked).follow_type = "instant" # Draw debug info if requested. self.game_services.debug_level = self.config.get_or_default("debug", 0) # Make the player player = self.entity_manager.create_entity("player.txt") camera.get_component(components.Tracking).tracked.entity = player # Create a view to pass to the input handling - this lets it map between # world and screen coordinates. view = drawing.CameraView(self.renderer, camera) # Make the input handling system. self.input_handling = input_handling.InputHandling(view, self.game_services) # Create the wave spawner. if not self.config.get_or_default("peaceful_mode", False): self.entity_manager.register_component_system(systems.WaveSpawnerSystem()) # Make it so that bullets can damage things. self.entity_manager.get_system(physics.Physics).add_collision_handler( DamageCollisionHandler() ) # Set the scrolling background. self.drawing.set_background("res/images/857-tileable-classic-nebula-space-patterns/6.jpg") # Run the game loop. self.running = True fps = 60 clock = pygame.time.Clock() tick_time = 1.0/fps while self.running: # Has a load been requested? if self.want_load: self.entity_manager.load(open("space_game.save", "r")) self.want_load = False ## Create any queued objects self.entity_manager.create_queued_objects() # If a pause has been scheduled then pause the game. if self.want_pause: self.want_pause = False self.entity_manager.pause() # If an unpause has been scheduled then unpause the game. if self.want_resume: self.want_resume = False self.entity_manager.unpause() # If a step has been scheduled then advance a frame and schedule a # pause. if self.want_step: self.entity_manager.unpause() self.want_pause = True self.want_step = False # Input for e in pygame.event.get(): response = self.input_handling.handle_input(e) if response.quit_requested: self.running = False # Update the systems. self.entity_manager.update(tick_time) # Draw self.renderer.pre_render(view) self.drawing.draw(view) self.renderer.post_render() self.renderer.flip_buffers() # Maintain frame rate. clock.tick(fps) # Remember how long the frame took. limited_fps = 1.0/(clock.get_time() / 1000.0) raw_fps = 1.0/(clock.get_rawtime() / 1000.0) time_ratio = (1.0/fps) / (clock.get_time()/1000.0) self.game_services.info.update_framerate(limited_fps, raw_fps, time_ratio) # Finalise pygame.quit() def load(self): """ Schedule a load. """ self.want_load = True def save(self): """ Save the game. """ self.entity_manager.save(open("space_game.save", "w")) def toggle_pause(self): """ Schedule a pause. """ if self.entity_manager.paused(): self.want_resume = True else: self.want_pause = True def step(self): """ Schedule a step. """ self.want_step = True class DamageCollisionHandler(physics.CollisionHandler): """ Collision handler to apply bullet damage. """ def __init__(self): """ Constructor. """ # Match entities that cause damage on contact to entities that can be # damaged. physics.CollisionHandler.__init__( self, components.DamageOnContact, components.Hitpoints ) def handle_matching_collision(self, dmg, hp): """ Apply the logical effect of the collision and return the result. """ # Delegate to the function in 'systems'. systems.handle_damage_collision(dmg, hp) # Return the result ( we handled the collision. ) return physics.CollisionResult(True, True)
get_entity_manager
identifier_name
hwdetect.rs
use anyhow::anyhow; use nom::character::complete::{newline, satisfy, space0}; use nom::combinator::{map, map_res, opt}; use nom::multi::{many1, separated_list1}; use nom::sequence::{preceded, terminated, tuple}; use nom::Parser; use nom_supreme::tag::complete::tag; use tako::hwstats::GpuFamily; use tako::internal::has_unique_elements; use tako::resources::{ ResourceDescriptorItem, ResourceDescriptorKind, ResourceIndex, ResourceLabel, AMD_GPU_RESOURCE_NAME, MEM_RESOURCE_NAME, NVIDIA_GPU_RESOURCE_NAME, }; use tako::{format_comma_delimited, Set}; use crate::common::format::human_size; use crate::common::parser::{consume_all, p_u32, NomResult}; pub fn detect_cpus() -> anyhow::Result<ResourceDescriptorKind> { read_linux_numa() .map(|numa_nodes| { let filtered = filter_masked_cpus(numa_nodes.clone()); if filtered.iter().flatten().count() != numa_nodes.iter().flatten().count() { log::info!( "Some cores were filtered by a CPU mask. All cores: {:?}. Allowed cores: {:?}.", numa_nodes .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>(), filtered .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>() ); } filtered }) .and_then(|groups| { ResourceDescriptorKind::groups_numeric(groups) .map_err(|_| anyhow!("Inconsistent CPU naming got from detection")) }) .or_else(|e| { log::debug!("Detecting linux failed: {}", e); let n_cpus = num_cpus::get() as u32; if n_cpus < 1 { anyhow::bail!("Cpu detection failed"); }; Ok(ResourceDescriptorKind::simple_indices(n_cpus)) }) } /// Filter cores that are not allowed because of CPU affinity mask. fn filter_masked_cpus(numa_nodes: Vec<Vec<ResourceIndex>>) -> Vec<Vec<ResourceIndex>> { match core_affinity::get_core_ids() { Some(allowed) => { let cpu_set: Set<usize> = allowed.into_iter().map(|core_id| core_id.id).collect(); numa_nodes .into_iter() .map(|mut numa_node| { numa_node.retain(|&cpu| cpu_set.contains(&cpu.as_usize())); numa_node }) .collect() } None => { log::error!("Failed to found CPU mask. Allowing all cores."); numa_nodes } } } pub fn prune_hyper_threading( kind: &ResourceDescriptorKind, ) -> anyhow::Result<ResourceDescriptorKind> { let groups = kind.as_groups(); let mut new_desc = Vec::new(); for group in groups { let mut new_group = Vec::new(); for cpu_id in group { if read_linux_thread_siblings(&cpu_id)? .iter() .min() .ok_or_else(|| anyhow::anyhow!("Thread siblings are empty")) .map(|v| *v == cpu_id)? { new_group.push(cpu_id); } } new_desc.push(new_group); } Ok(ResourceDescriptorKind::groups(new_desc).unwrap()) } /// Detects additional resources (apart from CPU) on this worker. /// Also returns the detected GPU families. pub fn detect_additional_resources( items: &mut Vec<ResourceDescriptorItem>, ) -> anyhow::Result<Set<GpuFamily>> { let mut gpu_families = Set::new(); let has_resource = |items: &[ResourceDescriptorItem], name: &str| items.iter().any(|x| x.name == name); let detected_gpus = detect_gpus_from_env(); if detected_gpus.is_empty() && !has_resource(items, NVIDIA_GPU_RESOURCE_NAME) { if let Ok(count) = read_nvidia_linux_gpu_count() { if count > 0 { gpu_families.insert(GpuFamily::Nvidia); log::info!("Detected {} GPUs from procs", count); items.push(ResourceDescriptorItem { name: NVIDIA_GPU_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::simple_indices(count as u32), }); } } } else { for gpu in detected_gpus { if !has_resource(items, gpu.resource_name) { gpu_families.insert(gpu.family); items.push(ResourceDescriptorItem { name: gpu.resource_name.to_string(), kind: gpu.resource, }); } } } if !has_resource(items, MEM_RESOURCE_NAME) { if let Ok(mem) = read_linux_memory() { log::info!("Detected {mem}B of memory ({})", human_size(mem)); items.push(ResourceDescriptorItem { name: MEM_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::Sum { size: mem }, }); } } Ok(gpu_families) } /// GPU resource that can be detected from an environment variable. pub struct GpuEnvironmentRecord { env_var: &'static str, pub resource_name: &'static str, pub family: GpuFamily, } impl GpuEnvironmentRecord { const fn new(env_var: &'static str, resource_name: &'static str, family: GpuFamily) -> Self { Self { env_var, resource_name, family, } } } pub const GPU_ENVIRONMENTS: &[GpuEnvironmentRecord; 2] = &[ GpuEnvironmentRecord::new( "CUDA_VISIBLE_DEVICES", NVIDIA_GPU_RESOURCE_NAME, GpuFamily::Nvidia, ), GpuEnvironmentRecord::new( "ROCR_VISIBLE_DEVICES", AMD_GPU_RESOURCE_NAME, GpuFamily::Amd, ), ]; struct DetectedGpu { resource_name: &'static str, resource: ResourceDescriptorKind, family: GpuFamily, } /// Tries to detect available GPUs from one of the `GPU_ENV_KEYS` environment variables. fn detect_gpus_from_env() -> Vec<DetectedGpu> { let mut gpus = Vec::new(); for gpu_env in GPU_ENVIRONMENTS { if let Ok(devices_str) = std::env::var(gpu_env.env_var)
} gpus } /// Try to find out how many Nvidia GPUs are available on the current node. fn read_nvidia_linux_gpu_count() -> anyhow::Result<usize> { Ok(std::fs::read_dir("/proc/driver/nvidia/gpus")?.count()) } /// Try to get total memory on the current node. fn read_linux_memory() -> anyhow::Result<u64> { Ok(psutil::memory::virtual_memory()?.total()) } /// Try to find the CPU NUMA configuration. /// /// Returns a list of NUMA nodes, each node contains a list of assigned CPUs. fn read_linux_numa() -> anyhow::Result<Vec<Vec<ResourceIndex>>> { let nodes = parse_range(&std::fs::read_to_string( "/sys/devices/system/node/possible", )?)?; let mut numa_nodes: Vec<Vec<ResourceIndex>> = Vec::new(); for numa_index in nodes { let filename = format!("/sys/devices/system/node/node{numa_index}/cpulist"); numa_nodes.push(parse_range(&std::fs::read_to_string(filename)?)?); } log::debug!("Linux numa detection is successful"); Ok(numa_nodes) } fn read_linux_thread_siblings(cpu_id: &ResourceLabel) -> anyhow::Result<Vec<ResourceLabel>> { let filename = format!( "/sys/devices/system/cpu/cpu{}/topology/thread_siblings_list", cpu_id ); log::debug!("Reading {}", filename); parse_range(&std::fs::read_to_string(filename)?) .map(|indices| indices.into_iter().map(|i| i.to_string()).collect()) } fn p_cpu_range(input: &str) -> NomResult<Vec<ResourceIndex>> { map_res( tuple(( terminated(p_u32, space0), opt(terminated( preceded(tuple((tag("-"), space0)), p_u32), space0, )), )), |(u, v)| crate::Result::Ok((u..=v.unwrap_or(u)).map(|id| id.into()).collect()), ) .parse(input) } fn p_cpu_ranges(input: &str) -> NomResult<Vec<ResourceIndex>> { separated_list1(terminated(tag(","), space0), p_cpu_range)(input) .map(|(a, b)| (a, b.into_iter().flatten().collect())) } fn parse_range(input: &str) -> anyhow::Result<Vec<ResourceIndex>> { let parser = terminated(p_cpu_ranges, opt(newline)); consume_all(parser, input) } fn parse_comma_separated_values(input: &str) -> anyhow::Result<Vec<String>> { let any_except_comma = map(many1(satisfy(|c| c != ',')), |items| { items.into_iter().collect::<String>() }); consume_all(separated_list1(tag(","), any_except_comma), input) } #[cfg(test)] mod tests { use tako::AsIdVec; use super::{parse_range, read_linux_numa}; #[test] fn test_parse_range() { assert_eq!(parse_range("10").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("10\n").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("0-3\n").unwrap(), vec![0, 1, 2, 3].to_ids()); assert_eq!( parse_range("111-115\n").unwrap(), vec![111, 112, 113, 114, 115].to_ids() ); assert_eq!(parse_range("2,7, 10").unwrap(), vec![2, 7, 10].to_ids()); assert_eq!( parse_range("2-7,10-12,20").unwrap(), vec![2, 3, 4, 5, 6, 7, 10, 11, 12, 20].to_ids() ); assert!(parse_range("xx\n").is_err()); assert!(parse_range("-\n").is_err()); assert!(parse_range("-2\n").is_err()); assert!(parse_range("0-1-2\n").is_err()); assert!(parse_range(",,").is_err()); } #[test] fn test_read_linux_numa() { let cpus = read_linux_numa().unwrap(); assert_eq!(cpus.iter().map(|x| x.len()).sum::<usize>(), num_cpus::get()); } }
{ if let Ok(devices) = parse_comma_separated_values(&devices_str) { log::info!( "Detected GPUs {} from `{}`", format_comma_delimited(&devices), gpu_env.env_var, ); if !has_unique_elements(&devices) { log::warn!("{} contains duplicates ({devices_str})", gpu_env.env_var); continue; } let list = ResourceDescriptorKind::list(devices).expect("List values were not unique"); gpus.push(DetectedGpu { resource_name: gpu_env.resource_name, resource: list, family: gpu_env.family, }); } }
conditional_block
hwdetect.rs
use anyhow::anyhow; use nom::character::complete::{newline, satisfy, space0}; use nom::combinator::{map, map_res, opt}; use nom::multi::{many1, separated_list1}; use nom::sequence::{preceded, terminated, tuple}; use nom::Parser; use nom_supreme::tag::complete::tag; use tako::hwstats::GpuFamily; use tako::internal::has_unique_elements; use tako::resources::{ ResourceDescriptorItem, ResourceDescriptorKind, ResourceIndex, ResourceLabel, AMD_GPU_RESOURCE_NAME, MEM_RESOURCE_NAME, NVIDIA_GPU_RESOURCE_NAME, }; use tako::{format_comma_delimited, Set}; use crate::common::format::human_size; use crate::common::parser::{consume_all, p_u32, NomResult}; pub fn detect_cpus() -> anyhow::Result<ResourceDescriptorKind> { read_linux_numa() .map(|numa_nodes| { let filtered = filter_masked_cpus(numa_nodes.clone()); if filtered.iter().flatten().count() != numa_nodes.iter().flatten().count() { log::info!( "Some cores were filtered by a CPU mask. All cores: {:?}. Allowed cores: {:?}.", numa_nodes .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>(), filtered .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>() ); } filtered }) .and_then(|groups| { ResourceDescriptorKind::groups_numeric(groups) .map_err(|_| anyhow!("Inconsistent CPU naming got from detection")) }) .or_else(|e| { log::debug!("Detecting linux failed: {}", e); let n_cpus = num_cpus::get() as u32; if n_cpus < 1 { anyhow::bail!("Cpu detection failed"); }; Ok(ResourceDescriptorKind::simple_indices(n_cpus)) }) } /// Filter cores that are not allowed because of CPU affinity mask. fn filter_masked_cpus(numa_nodes: Vec<Vec<ResourceIndex>>) -> Vec<Vec<ResourceIndex>> { match core_affinity::get_core_ids() { Some(allowed) => { let cpu_set: Set<usize> = allowed.into_iter().map(|core_id| core_id.id).collect(); numa_nodes .into_iter() .map(|mut numa_node| { numa_node.retain(|&cpu| cpu_set.contains(&cpu.as_usize())); numa_node }) .collect() } None => { log::error!("Failed to found CPU mask. Allowing all cores."); numa_nodes } } } pub fn prune_hyper_threading( kind: &ResourceDescriptorKind, ) -> anyhow::Result<ResourceDescriptorKind> { let groups = kind.as_groups(); let mut new_desc = Vec::new(); for group in groups { let mut new_group = Vec::new(); for cpu_id in group { if read_linux_thread_siblings(&cpu_id)? .iter() .min() .ok_or_else(|| anyhow::anyhow!("Thread siblings are empty")) .map(|v| *v == cpu_id)? { new_group.push(cpu_id); } } new_desc.push(new_group); } Ok(ResourceDescriptorKind::groups(new_desc).unwrap()) } /// Detects additional resources (apart from CPU) on this worker. /// Also returns the detected GPU families. pub fn detect_additional_resources( items: &mut Vec<ResourceDescriptorItem>, ) -> anyhow::Result<Set<GpuFamily>>
/// GPU resource that can be detected from an environment variable. pub struct GpuEnvironmentRecord { env_var: &'static str, pub resource_name: &'static str, pub family: GpuFamily, } impl GpuEnvironmentRecord { const fn new(env_var: &'static str, resource_name: &'static str, family: GpuFamily) -> Self { Self { env_var, resource_name, family, } } } pub const GPU_ENVIRONMENTS: &[GpuEnvironmentRecord; 2] = &[ GpuEnvironmentRecord::new( "CUDA_VISIBLE_DEVICES", NVIDIA_GPU_RESOURCE_NAME, GpuFamily::Nvidia, ), GpuEnvironmentRecord::new( "ROCR_VISIBLE_DEVICES", AMD_GPU_RESOURCE_NAME, GpuFamily::Amd, ), ]; struct DetectedGpu { resource_name: &'static str, resource: ResourceDescriptorKind, family: GpuFamily, } /// Tries to detect available GPUs from one of the `GPU_ENV_KEYS` environment variables. fn detect_gpus_from_env() -> Vec<DetectedGpu> { let mut gpus = Vec::new(); for gpu_env in GPU_ENVIRONMENTS { if let Ok(devices_str) = std::env::var(gpu_env.env_var) { if let Ok(devices) = parse_comma_separated_values(&devices_str) { log::info!( "Detected GPUs {} from `{}`", format_comma_delimited(&devices), gpu_env.env_var, ); if !has_unique_elements(&devices) { log::warn!("{} contains duplicates ({devices_str})", gpu_env.env_var); continue; } let list = ResourceDescriptorKind::list(devices).expect("List values were not unique"); gpus.push(DetectedGpu { resource_name: gpu_env.resource_name, resource: list, family: gpu_env.family, }); } } } gpus } /// Try to find out how many Nvidia GPUs are available on the current node. fn read_nvidia_linux_gpu_count() -> anyhow::Result<usize> { Ok(std::fs::read_dir("/proc/driver/nvidia/gpus")?.count()) } /// Try to get total memory on the current node. fn read_linux_memory() -> anyhow::Result<u64> { Ok(psutil::memory::virtual_memory()?.total()) } /// Try to find the CPU NUMA configuration. /// /// Returns a list of NUMA nodes, each node contains a list of assigned CPUs. fn read_linux_numa() -> anyhow::Result<Vec<Vec<ResourceIndex>>> { let nodes = parse_range(&std::fs::read_to_string( "/sys/devices/system/node/possible", )?)?; let mut numa_nodes: Vec<Vec<ResourceIndex>> = Vec::new(); for numa_index in nodes { let filename = format!("/sys/devices/system/node/node{numa_index}/cpulist"); numa_nodes.push(parse_range(&std::fs::read_to_string(filename)?)?); } log::debug!("Linux numa detection is successful"); Ok(numa_nodes) } fn read_linux_thread_siblings(cpu_id: &ResourceLabel) -> anyhow::Result<Vec<ResourceLabel>> { let filename = format!( "/sys/devices/system/cpu/cpu{}/topology/thread_siblings_list", cpu_id ); log::debug!("Reading {}", filename); parse_range(&std::fs::read_to_string(filename)?) .map(|indices| indices.into_iter().map(|i| i.to_string()).collect()) } fn p_cpu_range(input: &str) -> NomResult<Vec<ResourceIndex>> { map_res( tuple(( terminated(p_u32, space0), opt(terminated( preceded(tuple((tag("-"), space0)), p_u32), space0, )), )), |(u, v)| crate::Result::Ok((u..=v.unwrap_or(u)).map(|id| id.into()).collect()), ) .parse(input) } fn p_cpu_ranges(input: &str) -> NomResult<Vec<ResourceIndex>> { separated_list1(terminated(tag(","), space0), p_cpu_range)(input) .map(|(a, b)| (a, b.into_iter().flatten().collect())) } fn parse_range(input: &str) -> anyhow::Result<Vec<ResourceIndex>> { let parser = terminated(p_cpu_ranges, opt(newline)); consume_all(parser, input) } fn parse_comma_separated_values(input: &str) -> anyhow::Result<Vec<String>> { let any_except_comma = map(many1(satisfy(|c| c != ',')), |items| { items.into_iter().collect::<String>() }); consume_all(separated_list1(tag(","), any_except_comma), input) } #[cfg(test)] mod tests { use tako::AsIdVec; use super::{parse_range, read_linux_numa}; #[test] fn test_parse_range() { assert_eq!(parse_range("10").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("10\n").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("0-3\n").unwrap(), vec![0, 1, 2, 3].to_ids()); assert_eq!( parse_range("111-115\n").unwrap(), vec![111, 112, 113, 114, 115].to_ids() ); assert_eq!(parse_range("2,7, 10").unwrap(), vec![2, 7, 10].to_ids()); assert_eq!( parse_range("2-7,10-12,20").unwrap(), vec![2, 3, 4, 5, 6, 7, 10, 11, 12, 20].to_ids() ); assert!(parse_range("xx\n").is_err()); assert!(parse_range("-\n").is_err()); assert!(parse_range("-2\n").is_err()); assert!(parse_range("0-1-2\n").is_err()); assert!(parse_range(",,").is_err()); } #[test] fn test_read_linux_numa() { let cpus = read_linux_numa().unwrap(); assert_eq!(cpus.iter().map(|x| x.len()).sum::<usize>(), num_cpus::get()); } }
{ let mut gpu_families = Set::new(); let has_resource = |items: &[ResourceDescriptorItem], name: &str| items.iter().any(|x| x.name == name); let detected_gpus = detect_gpus_from_env(); if detected_gpus.is_empty() && !has_resource(items, NVIDIA_GPU_RESOURCE_NAME) { if let Ok(count) = read_nvidia_linux_gpu_count() { if count > 0 { gpu_families.insert(GpuFamily::Nvidia); log::info!("Detected {} GPUs from procs", count); items.push(ResourceDescriptorItem { name: NVIDIA_GPU_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::simple_indices(count as u32), }); } } } else { for gpu in detected_gpus { if !has_resource(items, gpu.resource_name) { gpu_families.insert(gpu.family); items.push(ResourceDescriptorItem { name: gpu.resource_name.to_string(), kind: gpu.resource, }); } } } if !has_resource(items, MEM_RESOURCE_NAME) { if let Ok(mem) = read_linux_memory() { log::info!("Detected {mem}B of memory ({})", human_size(mem)); items.push(ResourceDescriptorItem { name: MEM_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::Sum { size: mem }, }); } } Ok(gpu_families) }
identifier_body
hwdetect.rs
use nom::sequence::{preceded, terminated, tuple}; use nom::Parser; use nom_supreme::tag::complete::tag; use tako::hwstats::GpuFamily; use tako::internal::has_unique_elements; use tako::resources::{ ResourceDescriptorItem, ResourceDescriptorKind, ResourceIndex, ResourceLabel, AMD_GPU_RESOURCE_NAME, MEM_RESOURCE_NAME, NVIDIA_GPU_RESOURCE_NAME, }; use tako::{format_comma_delimited, Set}; use crate::common::format::human_size; use crate::common::parser::{consume_all, p_u32, NomResult}; pub fn detect_cpus() -> anyhow::Result<ResourceDescriptorKind> { read_linux_numa() .map(|numa_nodes| { let filtered = filter_masked_cpus(numa_nodes.clone()); if filtered.iter().flatten().count() != numa_nodes.iter().flatten().count() { log::info!( "Some cores were filtered by a CPU mask. All cores: {:?}. Allowed cores: {:?}.", numa_nodes .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>(), filtered .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>() ); } filtered }) .and_then(|groups| { ResourceDescriptorKind::groups_numeric(groups) .map_err(|_| anyhow!("Inconsistent CPU naming got from detection")) }) .or_else(|e| { log::debug!("Detecting linux failed: {}", e); let n_cpus = num_cpus::get() as u32; if n_cpus < 1 { anyhow::bail!("Cpu detection failed"); }; Ok(ResourceDescriptorKind::simple_indices(n_cpus)) }) } /// Filter cores that are not allowed because of CPU affinity mask. fn filter_masked_cpus(numa_nodes: Vec<Vec<ResourceIndex>>) -> Vec<Vec<ResourceIndex>> { match core_affinity::get_core_ids() { Some(allowed) => { let cpu_set: Set<usize> = allowed.into_iter().map(|core_id| core_id.id).collect(); numa_nodes .into_iter() .map(|mut numa_node| { numa_node.retain(|&cpu| cpu_set.contains(&cpu.as_usize())); numa_node }) .collect() } None => { log::error!("Failed to found CPU mask. Allowing all cores."); numa_nodes } } } pub fn prune_hyper_threading( kind: &ResourceDescriptorKind, ) -> anyhow::Result<ResourceDescriptorKind> { let groups = kind.as_groups(); let mut new_desc = Vec::new(); for group in groups { let mut new_group = Vec::new(); for cpu_id in group { if read_linux_thread_siblings(&cpu_id)? .iter() .min() .ok_or_else(|| anyhow::anyhow!("Thread siblings are empty")) .map(|v| *v == cpu_id)? { new_group.push(cpu_id); } } new_desc.push(new_group); } Ok(ResourceDescriptorKind::groups(new_desc).unwrap()) } /// Detects additional resources (apart from CPU) on this worker. /// Also returns the detected GPU families. pub fn detect_additional_resources( items: &mut Vec<ResourceDescriptorItem>, ) -> anyhow::Result<Set<GpuFamily>> { let mut gpu_families = Set::new(); let has_resource = |items: &[ResourceDescriptorItem], name: &str| items.iter().any(|x| x.name == name); let detected_gpus = detect_gpus_from_env(); if detected_gpus.is_empty() && !has_resource(items, NVIDIA_GPU_RESOURCE_NAME) { if let Ok(count) = read_nvidia_linux_gpu_count() { if count > 0 { gpu_families.insert(GpuFamily::Nvidia); log::info!("Detected {} GPUs from procs", count); items.push(ResourceDescriptorItem { name: NVIDIA_GPU_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::simple_indices(count as u32), }); } } } else { for gpu in detected_gpus { if !has_resource(items, gpu.resource_name) { gpu_families.insert(gpu.family); items.push(ResourceDescriptorItem { name: gpu.resource_name.to_string(), kind: gpu.resource, }); } } } if !has_resource(items, MEM_RESOURCE_NAME) { if let Ok(mem) = read_linux_memory() { log::info!("Detected {mem}B of memory ({})", human_size(mem)); items.push(ResourceDescriptorItem { name: MEM_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::Sum { size: mem }, }); } } Ok(gpu_families) } /// GPU resource that can be detected from an environment variable. pub struct GpuEnvironmentRecord { env_var: &'static str, pub resource_name: &'static str, pub family: GpuFamily, } impl GpuEnvironmentRecord { const fn new(env_var: &'static str, resource_name: &'static str, family: GpuFamily) -> Self { Self { env_var, resource_name, family, } } } pub const GPU_ENVIRONMENTS: &[GpuEnvironmentRecord; 2] = &[ GpuEnvironmentRecord::new( "CUDA_VISIBLE_DEVICES", NVIDIA_GPU_RESOURCE_NAME, GpuFamily::Nvidia, ), GpuEnvironmentRecord::new( "ROCR_VISIBLE_DEVICES", AMD_GPU_RESOURCE_NAME, GpuFamily::Amd, ), ]; struct DetectedGpu { resource_name: &'static str, resource: ResourceDescriptorKind, family: GpuFamily, } /// Tries to detect available GPUs from one of the `GPU_ENV_KEYS` environment variables. fn detect_gpus_from_env() -> Vec<DetectedGpu> { let mut gpus = Vec::new(); for gpu_env in GPU_ENVIRONMENTS { if let Ok(devices_str) = std::env::var(gpu_env.env_var) { if let Ok(devices) = parse_comma_separated_values(&devices_str) { log::info!( "Detected GPUs {} from `{}`", format_comma_delimited(&devices), gpu_env.env_var, ); if !has_unique_elements(&devices) { log::warn!("{} contains duplicates ({devices_str})", gpu_env.env_var); continue; } let list = ResourceDescriptorKind::list(devices).expect("List values were not unique"); gpus.push(DetectedGpu { resource_name: gpu_env.resource_name, resource: list, family: gpu_env.family, }); } } } gpus } /// Try to find out how many Nvidia GPUs are available on the current node. fn read_nvidia_linux_gpu_count() -> anyhow::Result<usize> { Ok(std::fs::read_dir("/proc/driver/nvidia/gpus")?.count()) } /// Try to get total memory on the current node. fn read_linux_memory() -> anyhow::Result<u64> { Ok(psutil::memory::virtual_memory()?.total()) } /// Try to find the CPU NUMA configuration. /// /// Returns a list of NUMA nodes, each node contains a list of assigned CPUs. fn read_linux_numa() -> anyhow::Result<Vec<Vec<ResourceIndex>>> { let nodes = parse_range(&std::fs::read_to_string( "/sys/devices/system/node/possible", )?)?; let mut numa_nodes: Vec<Vec<ResourceIndex>> = Vec::new(); for numa_index in nodes { let filename = format!("/sys/devices/system/node/node{numa_index}/cpulist"); numa_nodes.push(parse_range(&std::fs::read_to_string(filename)?)?); } log::debug!("Linux numa detection is successful"); Ok(numa_nodes) } fn read_linux_thread_siblings(cpu_id: &ResourceLabel) -> anyhow::Result<Vec<ResourceLabel>> { let filename = format!( "/sys/devices/system/cpu/cpu{}/topology/thread_siblings_list", cpu_id ); log::debug!("Reading {}", filename); parse_range(&std::fs::read_to_string(filename)?) .map(|indices| indices.into_iter().map(|i| i.to_string()).collect()) } fn p_cpu_range(input: &str) -> NomResult<Vec<ResourceIndex>> { map_res( tuple(( terminated(p_u32, space0), opt(terminated( preceded(tuple((tag("-"), space0)), p_u32), space0, )), )), |(u, v)| crate::Result::Ok((u..=v.unwrap_or(u)).map(|id| id.into()).collect()), ) .parse(input) } fn p_cpu_ranges(input: &str) -> NomResult<Vec<ResourceIndex>> { separated_list1(terminated(tag(","), space0), p_cpu_range)(input) .map(|(a, b)| (a, b.into_iter().flatten().collect())) } fn parse_range(input: &str) -> anyhow::Result<Vec<ResourceIndex>> { let parser = terminated(p_cpu_ranges, opt(newline)); consume_all(parser, input) } fn parse_comma_separated_values(input: &str) -> anyhow::Result<Vec<String>> { let any_except_comma = map(many1(satisfy(|c| c != ',')), |items| { items.into_iter().collect::<String>() }); consume_all(separated_list1(tag(","), any_except_comma), input) } #[cfg(test)] mod tests { use tako::AsIdVec; use super::{parse_range, read_linux_numa}; #[test] fn test_parse_range() { assert_eq!(parse_range("10").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("10\n").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("0-3\n").unwrap(), vec![0, 1, 2, 3].to_ids()); assert_eq!( parse_range("111-115\n").unwrap(), vec![111, 112, 113, 114, 115].to_ids() ); assert_eq!(parse_range("2,7, 10").unwrap(), vec![2, 7, 10].to_ids()); assert_eq!( parse_range("2-7,10-12,20").unwrap(), vec![2, 3, 4, 5, 6, 7, 10, 11, 12, 20].to_ids() ); assert!(parse_range("xx\n").is_err()); assert!(parse_range("-\n").is_err()); assert!(parse_range("-2\n").is_err()); assert!(parse_range("0-1-2\n").is_err()); assert!(parse_range(",,").is_err()); } #[test] fn test_read_linux_numa() { let cpus = read_linux_numa().unwrap(); assert_eq!(cpus.iter().map(|x| x.len()).sum::<usize>(), num_cpus::get()); } }
use anyhow::anyhow; use nom::character::complete::{newline, satisfy, space0}; use nom::combinator::{map, map_res, opt}; use nom::multi::{many1, separated_list1};
random_line_split
hwdetect.rs
use anyhow::anyhow; use nom::character::complete::{newline, satisfy, space0}; use nom::combinator::{map, map_res, opt}; use nom::multi::{many1, separated_list1}; use nom::sequence::{preceded, terminated, tuple}; use nom::Parser; use nom_supreme::tag::complete::tag; use tako::hwstats::GpuFamily; use tako::internal::has_unique_elements; use tako::resources::{ ResourceDescriptorItem, ResourceDescriptorKind, ResourceIndex, ResourceLabel, AMD_GPU_RESOURCE_NAME, MEM_RESOURCE_NAME, NVIDIA_GPU_RESOURCE_NAME, }; use tako::{format_comma_delimited, Set}; use crate::common::format::human_size; use crate::common::parser::{consume_all, p_u32, NomResult}; pub fn detect_cpus() -> anyhow::Result<ResourceDescriptorKind> { read_linux_numa() .map(|numa_nodes| { let filtered = filter_masked_cpus(numa_nodes.clone()); if filtered.iter().flatten().count() != numa_nodes.iter().flatten().count() { log::info!( "Some cores were filtered by a CPU mask. All cores: {:?}. Allowed cores: {:?}.", numa_nodes .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>(), filtered .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>() ); } filtered }) .and_then(|groups| { ResourceDescriptorKind::groups_numeric(groups) .map_err(|_| anyhow!("Inconsistent CPU naming got from detection")) }) .or_else(|e| { log::debug!("Detecting linux failed: {}", e); let n_cpus = num_cpus::get() as u32; if n_cpus < 1 { anyhow::bail!("Cpu detection failed"); }; Ok(ResourceDescriptorKind::simple_indices(n_cpus)) }) } /// Filter cores that are not allowed because of CPU affinity mask. fn filter_masked_cpus(numa_nodes: Vec<Vec<ResourceIndex>>) -> Vec<Vec<ResourceIndex>> { match core_affinity::get_core_ids() { Some(allowed) => { let cpu_set: Set<usize> = allowed.into_iter().map(|core_id| core_id.id).collect(); numa_nodes .into_iter() .map(|mut numa_node| { numa_node.retain(|&cpu| cpu_set.contains(&cpu.as_usize())); numa_node }) .collect() } None => { log::error!("Failed to found CPU mask. Allowing all cores."); numa_nodes } } } pub fn prune_hyper_threading( kind: &ResourceDescriptorKind, ) -> anyhow::Result<ResourceDescriptorKind> { let groups = kind.as_groups(); let mut new_desc = Vec::new(); for group in groups { let mut new_group = Vec::new(); for cpu_id in group { if read_linux_thread_siblings(&cpu_id)? .iter() .min() .ok_or_else(|| anyhow::anyhow!("Thread siblings are empty")) .map(|v| *v == cpu_id)? { new_group.push(cpu_id); } } new_desc.push(new_group); } Ok(ResourceDescriptorKind::groups(new_desc).unwrap()) } /// Detects additional resources (apart from CPU) on this worker. /// Also returns the detected GPU families. pub fn detect_additional_resources( items: &mut Vec<ResourceDescriptorItem>, ) -> anyhow::Result<Set<GpuFamily>> { let mut gpu_families = Set::new(); let has_resource = |items: &[ResourceDescriptorItem], name: &str| items.iter().any(|x| x.name == name); let detected_gpus = detect_gpus_from_env(); if detected_gpus.is_empty() && !has_resource(items, NVIDIA_GPU_RESOURCE_NAME) { if let Ok(count) = read_nvidia_linux_gpu_count() { if count > 0 { gpu_families.insert(GpuFamily::Nvidia); log::info!("Detected {} GPUs from procs", count); items.push(ResourceDescriptorItem { name: NVIDIA_GPU_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::simple_indices(count as u32), }); } } } else { for gpu in detected_gpus { if !has_resource(items, gpu.resource_name) { gpu_families.insert(gpu.family); items.push(ResourceDescriptorItem { name: gpu.resource_name.to_string(), kind: gpu.resource, }); } } } if !has_resource(items, MEM_RESOURCE_NAME) { if let Ok(mem) = read_linux_memory() { log::info!("Detected {mem}B of memory ({})", human_size(mem)); items.push(ResourceDescriptorItem { name: MEM_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::Sum { size: mem }, }); } } Ok(gpu_families) } /// GPU resource that can be detected from an environment variable. pub struct GpuEnvironmentRecord { env_var: &'static str, pub resource_name: &'static str, pub family: GpuFamily, } impl GpuEnvironmentRecord { const fn new(env_var: &'static str, resource_name: &'static str, family: GpuFamily) -> Self { Self { env_var, resource_name, family, } } } pub const GPU_ENVIRONMENTS: &[GpuEnvironmentRecord; 2] = &[ GpuEnvironmentRecord::new( "CUDA_VISIBLE_DEVICES", NVIDIA_GPU_RESOURCE_NAME, GpuFamily::Nvidia, ), GpuEnvironmentRecord::new( "ROCR_VISIBLE_DEVICES", AMD_GPU_RESOURCE_NAME, GpuFamily::Amd, ), ]; struct DetectedGpu { resource_name: &'static str, resource: ResourceDescriptorKind, family: GpuFamily, } /// Tries to detect available GPUs from one of the `GPU_ENV_KEYS` environment variables. fn detect_gpus_from_env() -> Vec<DetectedGpu> { let mut gpus = Vec::new(); for gpu_env in GPU_ENVIRONMENTS { if let Ok(devices_str) = std::env::var(gpu_env.env_var) { if let Ok(devices) = parse_comma_separated_values(&devices_str) { log::info!( "Detected GPUs {} from `{}`", format_comma_delimited(&devices), gpu_env.env_var, ); if !has_unique_elements(&devices) { log::warn!("{} contains duplicates ({devices_str})", gpu_env.env_var); continue; } let list = ResourceDescriptorKind::list(devices).expect("List values were not unique"); gpus.push(DetectedGpu { resource_name: gpu_env.resource_name, resource: list, family: gpu_env.family, }); } } } gpus } /// Try to find out how many Nvidia GPUs are available on the current node. fn read_nvidia_linux_gpu_count() -> anyhow::Result<usize> { Ok(std::fs::read_dir("/proc/driver/nvidia/gpus")?.count()) } /// Try to get total memory on the current node. fn
() -> anyhow::Result<u64> { Ok(psutil::memory::virtual_memory()?.total()) } /// Try to find the CPU NUMA configuration. /// /// Returns a list of NUMA nodes, each node contains a list of assigned CPUs. fn read_linux_numa() -> anyhow::Result<Vec<Vec<ResourceIndex>>> { let nodes = parse_range(&std::fs::read_to_string( "/sys/devices/system/node/possible", )?)?; let mut numa_nodes: Vec<Vec<ResourceIndex>> = Vec::new(); for numa_index in nodes { let filename = format!("/sys/devices/system/node/node{numa_index}/cpulist"); numa_nodes.push(parse_range(&std::fs::read_to_string(filename)?)?); } log::debug!("Linux numa detection is successful"); Ok(numa_nodes) } fn read_linux_thread_siblings(cpu_id: &ResourceLabel) -> anyhow::Result<Vec<ResourceLabel>> { let filename = format!( "/sys/devices/system/cpu/cpu{}/topology/thread_siblings_list", cpu_id ); log::debug!("Reading {}", filename); parse_range(&std::fs::read_to_string(filename)?) .map(|indices| indices.into_iter().map(|i| i.to_string()).collect()) } fn p_cpu_range(input: &str) -> NomResult<Vec<ResourceIndex>> { map_res( tuple(( terminated(p_u32, space0), opt(terminated( preceded(tuple((tag("-"), space0)), p_u32), space0, )), )), |(u, v)| crate::Result::Ok((u..=v.unwrap_or(u)).map(|id| id.into()).collect()), ) .parse(input) } fn p_cpu_ranges(input: &str) -> NomResult<Vec<ResourceIndex>> { separated_list1(terminated(tag(","), space0), p_cpu_range)(input) .map(|(a, b)| (a, b.into_iter().flatten().collect())) } fn parse_range(input: &str) -> anyhow::Result<Vec<ResourceIndex>> { let parser = terminated(p_cpu_ranges, opt(newline)); consume_all(parser, input) } fn parse_comma_separated_values(input: &str) -> anyhow::Result<Vec<String>> { let any_except_comma = map(many1(satisfy(|c| c != ',')), |items| { items.into_iter().collect::<String>() }); consume_all(separated_list1(tag(","), any_except_comma), input) } #[cfg(test)] mod tests { use tako::AsIdVec; use super::{parse_range, read_linux_numa}; #[test] fn test_parse_range() { assert_eq!(parse_range("10").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("10\n").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("0-3\n").unwrap(), vec![0, 1, 2, 3].to_ids()); assert_eq!( parse_range("111-115\n").unwrap(), vec![111, 112, 113, 114, 115].to_ids() ); assert_eq!(parse_range("2,7, 10").unwrap(), vec![2, 7, 10].to_ids()); assert_eq!( parse_range("2-7,10-12,20").unwrap(), vec![2, 3, 4, 5, 6, 7, 10, 11, 12, 20].to_ids() ); assert!(parse_range("xx\n").is_err()); assert!(parse_range("-\n").is_err()); assert!(parse_range("-2\n").is_err()); assert!(parse_range("0-1-2\n").is_err()); assert!(parse_range(",,").is_err()); } #[test] fn test_read_linux_numa() { let cpus = read_linux_numa().unwrap(); assert_eq!(cpus.iter().map(|x| x.len()).sum::<usize>(), num_cpus::get()); } }
read_linux_memory
identifier_name
server.rs
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // This file implements a server that can handle multiple connections. use neqo_common::{ self as common, event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn, timer::Timer, Datagram, Decoder, Role, }; use neqo_crypto::{ encode_ech_config, AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttCheckResult, ZeroRttChecker, }; pub use crate::addr_valid::ValidateAddress; use crate::addr_valid::{AddressValidation, AddressValidationResult}; use crate::cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef}; use crate::connection::{Connection, Output, State}; use crate::packet::{PacketBuilder, PacketType, PublicPacket}; use crate::{ConnectionParameters, Res, Version}; use std::cell::RefCell; use std::collections::{HashMap, HashSet, VecDeque}; use std::fs::OpenOptions; use std::mem; use std::net::SocketAddr; use std::ops::{Deref, DerefMut}; use std::path::PathBuf; use std::rc::{Rc, Weak}; use std::time::{Duration, Instant}; pub enum InitialResult { Accept, Drop, Retry(Vec<u8>), } /// MIN_INITIAL_PACKET_SIZE is the smallest packet that can be used to establish /// a new connection across all QUIC versions this server supports. const MIN_INITIAL_PACKET_SIZE: usize = 1200; /// The size of timer buckets. This is higher than the actual timer granularity /// as this depends on there being some distribution of events. const TIMER_GRANULARITY: Duration = Duration::from_millis(4); /// The number of buckets in the timer. As mentioned in the definition of `Timer`, /// the granularity and capacity need to multiply to be larger than the largest /// delay that might be used. That's the idle timeout (currently 30s). const TIMER_CAPACITY: usize = 16384; type StateRef = Rc<RefCell<ServerConnectionState>>; type ConnectionTableRef = Rc<RefCell<HashMap<ConnectionId, StateRef>>>; #[derive(Debug)] pub struct ServerConnectionState { c: Connection, active_attempt: Option<AttemptKey>, last_timer: Instant, } impl Deref for ServerConnectionState { type Target = Connection; fn deref(&self) -> &Self::Target { &self.c } } impl DerefMut for ServerConnectionState { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.c } } /// A `AttemptKey` is used to disambiguate connection attempts. /// Multiple connection attempts with the same key won't produce multiple connections. #[derive(Clone, Debug, Hash, PartialEq, Eq)] struct AttemptKey { // Using the remote address is sufficient for disambiguation, // until we support multiple local socket addresses. remote_address: SocketAddr, odcid: ConnectionId, } /// A `ServerZeroRttChecker` is a simple wrapper around a single checker. /// It uses `RefCell` so that the wrapped checker can be shared between /// multiple connections created by the server. #[derive(Clone, Debug)] struct ServerZeroRttChecker { checker: Rc<RefCell<Box<dyn ZeroRttChecker>>>, } impl ServerZeroRttChecker { pub fn new(checker: Box<dyn ZeroRttChecker>) -> Self { Self { checker: Rc::new(RefCell::new(checker)), } } } impl ZeroRttChecker for ServerZeroRttChecker { fn check(&self, token: &[u8]) -> ZeroRttCheckResult { self.checker.borrow().check(token) } } /// `InitialDetails` holds important information for processing `Initial` packets. struct InitialDetails { src_cid: ConnectionId, dst_cid: ConnectionId, token: Vec<u8>, version: Version, } impl InitialDetails { fn new(packet: &PublicPacket) -> Self { Self { src_cid: ConnectionId::from(packet.scid()), dst_cid: ConnectionId::from(packet.dcid()), token: packet.token().to_vec(), version: packet.version().unwrap(), } } } struct EchConfig { config: u8, public_name: String, sk: PrivateKey, pk: PublicKey, encoded: Vec<u8>, } impl EchConfig { fn new(config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey) -> Res<Self> { let encoded = encode_ech_config(config, public_name, pk)?; Ok(Self { config, public_name: String::from(public_name), sk: sk.clone(), pk: pk.clone(), encoded, }) } } pub struct Server { /// The names of certificates. certs: Vec<String>, /// The ALPN values that the server supports. protocols: Vec<String>, /// The cipher suites that the server supports. ciphers: Vec<Cipher>, /// Anti-replay configuration for 0-RTT. anti_replay: AntiReplay, /// A function for determining if 0-RTT can be accepted. zero_rtt_checker: ServerZeroRttChecker, /// A connection ID generator. cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, /// Connection parameters. conn_params: ConnectionParameters, /// Active connection attempts, keyed by `AttemptKey`. Initial packets with /// the same key are routed to the connection that was first accepted. /// This is cleared out when the connection is closed or established. active_attempts: HashMap<AttemptKey, StateRef>, /// All connections, keyed by ConnectionId. connections: ConnectionTableRef, /// The connections that have new events. active: HashSet<ActiveConnectionRef>, /// The set of connections that need immediate processing. waiting: VecDeque<StateRef>, /// Outstanding timers for connections. timers: Timer<StateRef>, /// Address validation logic, which determines whether we send a Retry. address_validation: Rc<RefCell<AddressValidation>>, /// Directory to create qlog traces in qlog_dir: Option<PathBuf>, /// Encrypted client hello (ECH) configuration. ech_config: Option<EchConfig>, } impl Server { /// Construct a new server. /// * `now` is the time that the server is instantiated. /// * `certs` is a list of the certificates that should be configured. /// * `protocols` is the preference list of ALPN values. /// * `anti_replay` is an anti-replay context. /// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This /// will be passed the value of the `extra` argument that was passed to /// `Connection::send_ticket` to see if it is OK. /// * `cid_generator` is responsible for generating connection IDs and parsing them; /// connection IDs produced by the manager cannot be zero-length. pub fn new( now: Instant, certs: &[impl AsRef<str>], protocols: &[impl AsRef<str>], anti_replay: AntiReplay, zero_rtt_checker: Box<dyn ZeroRttChecker>, cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, conn_params: ConnectionParameters, ) -> Res<Self> { let validation = AddressValidation::new(now, ValidateAddress::Never)?; Ok(Self { certs: certs.iter().map(|x| String::from(x.as_ref())).collect(), protocols: protocols.iter().map(|x| String::from(x.as_ref())).collect(), ciphers: Vec::new(), anti_replay, zero_rtt_checker: ServerZeroRttChecker::new(zero_rtt_checker), cid_generator, conn_params, active_attempts: HashMap::default(), connections: Rc::default(), active: HashSet::default(), waiting: VecDeque::default(), timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY), address_validation: Rc::new(RefCell::new(validation)), qlog_dir: None, ech_config: None, }) } /// Set or clear directory to create logs of connection events in QLOG format. pub fn set_qlog_dir(&mut self, dir: Option<PathBuf>) { self.qlog_dir = dir; } /// Set the policy for address validation. pub fn set_validation(&mut self, v: ValidateAddress) { self.address_validation.borrow_mut().set_validation(v); } /// Set the cipher suites that should be used. Set an empty value to use /// default values. pub fn set_ciphers(&mut self, ciphers: impl AsRef<[Cipher]>) { self.ciphers = Vec::from(ciphers.as_ref()); } pub fn enable_ech( &mut self, config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey, ) -> Res<()> { self.ech_config = Some(EchConfig::new(config, public_name, sk, pk)?); Ok(()) } pub fn ech_config(&self) -> &[u8] { self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded) } fn remove_timer(&mut self, c: &StateRef) { let last = c.borrow().last_timer; self.timers.remove(last, |t| Rc::ptr_eq(t, c)); } fn process_connection( &mut self, c: StateRef, dgram: Option<Datagram>, now: Instant, ) -> Option<Datagram> { qtrace!([self], "Process connection {:?}", c); let out = c.borrow_mut().process(dgram, now); match out { Output::Datagram(_) => { qtrace!([self], "Sending packet, added to waiting connections"); self.waiting.push_back(Rc::clone(&c)); } Output::Callback(delay) => { let next = now + delay; if next != c.borrow().last_timer { qtrace!([self], "Change timer to {:?}", next); self.remove_timer(&c); c.borrow_mut().last_timer = next; self.timers.add(next, Rc::clone(&c)); } } _ =>
} if c.borrow().has_events() { qtrace!([self], "Connection active: {:?}", c); self.active.insert(ActiveConnectionRef { c: Rc::clone(&c) }); } if *c.borrow().state() > State::Handshaking { // Remove any active connection attempt now that this is no longer handshaking. if let Some(k) = c.borrow_mut().active_attempt.take() { self.active_attempts.remove(&k); } } if matches!(c.borrow().state(), State::Closed(_)) { c.borrow_mut().set_qlog(NeqoQlog::disabled()); self.connections .borrow_mut() .retain(|_, v| !Rc::ptr_eq(v, &c)); } out.dgram() } fn connection(&self, cid: &ConnectionIdRef) -> Option<StateRef> { self.connections.borrow().get(&cid[..]).map(Rc::clone) } fn handle_initial( &mut self, initial: InitialDetails, dgram: Datagram, now: Instant, ) -> Option<Datagram> { qdebug!([self], "Handle initial"); let res = self .address_validation .borrow() .validate(&initial.token, dgram.source(), now); match res { AddressValidationResult::Invalid => None, AddressValidationResult::Pass => self.connection_attempt(initial, dgram, None, now), AddressValidationResult::ValidRetry(orig_dcid) => { self.connection_attempt(initial, dgram, Some(orig_dcid), now) } AddressValidationResult::Validate => { qinfo!([self], "Send retry for {:?}", initial.dst_cid); let res = self.address_validation.borrow().generate_retry_token( &initial.dst_cid, dgram.source(), now, ); let token = if let Ok(t) = res { t } else { qerror!([self], "unable to generate token, dropping packet"); return None; }; if let Some(new_dcid) = self.cid_generator.borrow_mut().generate_cid() { let packet = PacketBuilder::retry( initial.version, &initial.src_cid, &new_dcid, &token, &initial.dst_cid, ); if let Ok(p) = packet { let retry = Datagram::new(dgram.destination(), dgram.source(), p); Some(retry) } else { qerror!([self], "unable to encode retry, dropping packet"); None } } else { qerror!([self], "no connection ID for retry, dropping packet"); None } } } } fn connection_attempt( &mut self, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: orig_dcid.as_ref().unwrap_or(&initial.dst_cid).clone(), }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle Initial for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { self.accept_connection(attempt_key, initial, dgram, orig_dcid, now) } } fn create_qlog_trace(&self, attempt_key: &AttemptKey) -> NeqoQlog { if let Some(qlog_dir) = &self.qlog_dir { let mut qlog_path = qlog_dir.to_path_buf(); qlog_path.push(format!("{}.qlog", attempt_key.odcid)); // The original DCID is chosen by the client. Using create_new() // prevents attackers from overwriting existing logs. match OpenOptions::new() .write(true) .create_new(true) .open(&qlog_path) { Ok(f) => { qinfo!("Qlog output to {}", qlog_path.display()); let streamer = ::qlog::QlogStreamer::new( qlog::QLOG_VERSION.to_string(), Some("Neqo server qlog".to_string()), Some("Neqo server qlog".to_string()), None, std::time::Instant::now(), common::qlog::new_trace(Role::Server), Box::new(f), ); let n_qlog = NeqoQlog::enabled(streamer, qlog_path); match n_qlog { Ok(nql) => nql, Err(e) => { // Keep going but w/o qlogging qerror!("NeqoQlog error: {}", e); NeqoQlog::disabled() } } } Err(e) => { qerror!( "Could not open file {} for qlog output: {}", qlog_path.display(), e ); NeqoQlog::disabled() } } } else { NeqoQlog::disabled() } } fn setup_connection( &mut self, c: &mut Connection, attempt_key: &AttemptKey, initial: InitialDetails, orig_dcid: Option<ConnectionId>, ) { let zcheck = self.zero_rtt_checker.clone(); if c.server_enable_0rtt(&self.anti_replay, zcheck).is_err() { qwarn!([self], "Unable to enable 0-RTT"); } if let Some(odcid) = orig_dcid { // There was a retry, so set the connection IDs for. c.set_retry_cids(odcid, initial.src_cid, initial.dst_cid); } c.set_validation(Rc::clone(&self.address_validation)); c.set_qlog(self.create_qlog_trace(attempt_key)); if let Some(cfg) = &self.ech_config { if c.server_enable_ech(cfg.config, &cfg.public_name, &cfg.sk, &cfg.pk) .is_err() { qwarn!([self], "Unable to enable ECH"); } } } fn accept_connection( &mut self, attempt_key: AttemptKey, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { qinfo!([self], "Accept connection {:?}", attempt_key); // The internal connection ID manager that we use is not used directly. // Instead, wrap it so that we can save connection IDs. let cid_mgr = Rc::new(RefCell::new(ServerConnectionIdGenerator { c: Weak::new(), cid_generator: Rc::clone(&self.cid_generator), connections: Rc::clone(&self.connections), saved_cids: Vec::new(), })); let mut params = self.conn_params.clone(); params.get_versions_mut().set_initial(initial.version); let sconn = Connection::new_server( &self.certs, &self.protocols, Rc::clone(&cid_mgr) as _, params, ); if let Ok(mut c) = sconn { self.setup_connection(&mut c, &attempt_key, initial, orig_dcid); let c = Rc::new(RefCell::new(ServerConnectionState { c, last_timer: now, active_attempt: Some(attempt_key.clone()), })); cid_mgr.borrow_mut().set_connection(Rc::clone(&c)); let previous_attempt = self.active_attempts.insert(attempt_key, Rc::clone(&c)); debug_assert!(previous_attempt.is_none()); self.process_connection(c, Some(dgram), now) } else { qwarn!([self], "Unable to create connection"); None } } /// Handle 0-RTT packets that were sent with the client's choice of connection ID. /// Most 0-RTT will arrive this way. A client can usually send 1-RTT after it /// receives a connection ID from the server. fn handle_0rtt( &mut self, dgram: Datagram, dcid: ConnectionId, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: dcid, }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle 0-RTT for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { qdebug!([self], "Dropping 0-RTT for unknown connection"); None } } fn process_input(&mut self, dgram: Datagram, now: Instant) -> Option<Datagram> { qtrace!("Process datagram: {}", hex(&dgram[..])); // This is only looking at the first packet header in the datagram. // All packets in the datagram are routed to the same connection. let res = PublicPacket::decode(&dgram[..], self.cid_generator.borrow().as_decoder()); let (packet, _remainder) = match res { Ok(res) => res, _ => { qtrace!([self], "Discarding {:?}", dgram); return None; } }; // Finding an existing connection. Should be the most common case. if let Some(c) = self.connection(packet.dcid()) { return self.process_connection(c, Some(dgram), now); } if packet.packet_type() == PacketType::Short { // TODO send a stateless reset here. qtrace!([self], "Short header packet for an unknown connection"); return None; } if packet.packet_type() == PacketType::OtherVersion || (packet.packet_type() == PacketType::Initial && !self .conn_params .get_versions() .all() .contains(&packet.version().unwrap())) { if dgram.len() < MIN_INITIAL_PACKET_SIZE { qdebug!([self], "Unsupported version: too short"); return None; } qdebug!([self], "Unsupported version: {:x}", packet.wire_version()); let vn = PacketBuilder::version_negotiation( packet.scid(), packet.dcid(), packet.wire_version(), self.conn_params.get_versions().all(), ); return Some(Datagram::new(dgram.destination(), dgram.source(), vn)); } match packet.packet_type() { PacketType::Initial => { if dgram.len() < MIN_INITIAL_PACKET_SIZE { qdebug!([self], "Drop initial: too short"); return None; } // Copy values from `packet` because they are currently still borrowing from `dgram`. let initial = InitialDetails::new(&packet); self.handle_initial(initial, dgram, now) } PacketType::ZeroRtt => { let dcid = ConnectionId::from(packet.dcid()); self.handle_0rtt(dgram, dcid, now) } PacketType::OtherVersion => unreachable!(), _ => { qtrace!([self], "Not an initial packet"); None } } } /// Iterate through the pending connections looking for any that might want /// to send a datagram. Stop at the first one that does. fn process_next_output(&mut self, now: Instant) -> Option<Datagram> { qtrace!([self], "No packet to send, look at waiting connections"); while let Some(c) = self.waiting.pop_front() { if let Some(d) = self.process_connection(c, None, now) { return Some(d); } } qtrace!([self], "No packet to send still, run timers"); while let Some(c) = self.timers.take_next(now) { if let Some(d) = self.process_connection(c, None, now) { return Some(d); } } None } fn next_time(&mut self, now: Instant) -> Option<Duration> { if self.waiting.is_empty() { self.timers.next_time().map(|x| x - now) } else { Some(Duration::new(0, 0)) } } pub fn process(&mut self, dgram: Option<Datagram>, now: Instant) -> Output { let out = if let Some(d) = dgram { self.process_input(d, now) } else { None }; let out = out.or_else(|| self.process_next_output(now)); match out { Some(d) => { qtrace!([self], "Send packet: {:?}", d); Output::Datagram(d) } _ => match self.next_time(now) { Some(delay) => { qtrace!([self], "Wait: {:?}", delay); Output::Callback(delay) } _ => { qtrace!([self], "Go dormant"); Output::None } }, } } /// This lists the connections that have received new events /// as a result of calling `process()`. pub fn active_connections(&mut self) -> Vec<ActiveConnectionRef> { mem::take(&mut self.active).into_iter().collect() } pub fn add_to_waiting(&mut self, c: ActiveConnectionRef) { self.waiting.push_back(c.connection()); } } #[derive(Clone, Debug)] pub struct ActiveConnectionRef { c: StateRef, } impl ActiveConnectionRef { pub fn borrow(&self) -> impl Deref<Target = Connection> + '_ { std::cell::Ref::map(self.c.borrow(), |c| &c.c) } pub fn borrow_mut(&mut self) -> impl DerefMut<Target = Connection> + '_ { std::cell::RefMut::map(self.c.borrow_mut(), |c| &mut c.c) } pub fn connection(&self) -> StateRef { Rc::clone(&self.c) } } impl std::hash::Hash for ActiveConnectionRef { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { let ptr: *const _ = self.c.as_ref(); ptr.hash(state) } } impl PartialEq for ActiveConnectionRef { fn eq(&self, other: &Self) -> bool { Rc::ptr_eq(&self.c, &other.c) } } impl Eq for ActiveConnectionRef {} struct ServerConnectionIdGenerator { c: Weak<RefCell<ServerConnectionState>>, connections: ConnectionTableRef, cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, saved_cids: Vec<ConnectionId>, } impl ServerConnectionIdGenerator { pub fn set_connection(&mut self, c: StateRef) { let saved = std::mem::replace(&mut self.saved_cids, Vec::with_capacity(0)); for cid in saved { qtrace!("ServerConnectionIdGenerator inserting saved cid {}", cid); self.insert_cid(cid, Rc::clone(&c)); } self.c = Rc::downgrade(&c); } fn insert_cid(&mut self, cid: ConnectionId, rc: StateRef) { debug_assert!(!cid.is_empty()); self.connections.borrow_mut().insert(cid, rc); } } impl ConnectionIdDecoder for ServerConnectionIdGenerator { fn decode_cid<'a>(&self, dec: &mut Decoder<'a>) -> Option<ConnectionIdRef<'a>> { self.cid_generator.borrow_mut().decode_cid(dec) } } impl ConnectionIdGenerator for ServerConnectionIdGenerator { fn generate_cid(&mut self) -> Option<ConnectionId> { let maybe_cid = self.cid_generator.borrow_mut().generate_cid(); if let Some(cid) = maybe_cid { if let Some(rc) = self.c.upgrade() { self.insert_cid(cid.clone(), rc); } else { // This function can be called before the connection is set. // So save any connection IDs until that hookup happens. qtrace!("ServerConnectionIdGenerator saving cid {}", cid); self.saved_cids.push(cid.clone()); } Some(cid) } else { None } } fn as_decoder(&self) -> &dyn ConnectionIdDecoder { self } } impl ::std::fmt::Display for Server { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "Server") } }
{ self.remove_timer(&c); }
conditional_block
server.rs
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // This file implements a server that can handle multiple connections. use neqo_common::{ self as common, event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn, timer::Timer, Datagram, Decoder, Role, }; use neqo_crypto::{ encode_ech_config, AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttCheckResult, ZeroRttChecker, }; pub use crate::addr_valid::ValidateAddress; use crate::addr_valid::{AddressValidation, AddressValidationResult}; use crate::cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef}; use crate::connection::{Connection, Output, State}; use crate::packet::{PacketBuilder, PacketType, PublicPacket}; use crate::{ConnectionParameters, Res, Version}; use std::cell::RefCell; use std::collections::{HashMap, HashSet, VecDeque}; use std::fs::OpenOptions; use std::mem; use std::net::SocketAddr; use std::ops::{Deref, DerefMut}; use std::path::PathBuf; use std::rc::{Rc, Weak}; use std::time::{Duration, Instant}; pub enum InitialResult { Accept, Drop, Retry(Vec<u8>), } /// MIN_INITIAL_PACKET_SIZE is the smallest packet that can be used to establish /// a new connection across all QUIC versions this server supports.
/// as this depends on there being some distribution of events. const TIMER_GRANULARITY: Duration = Duration::from_millis(4); /// The number of buckets in the timer. As mentioned in the definition of `Timer`, /// the granularity and capacity need to multiply to be larger than the largest /// delay that might be used. That's the idle timeout (currently 30s). const TIMER_CAPACITY: usize = 16384; type StateRef = Rc<RefCell<ServerConnectionState>>; type ConnectionTableRef = Rc<RefCell<HashMap<ConnectionId, StateRef>>>; #[derive(Debug)] pub struct ServerConnectionState { c: Connection, active_attempt: Option<AttemptKey>, last_timer: Instant, } impl Deref for ServerConnectionState { type Target = Connection; fn deref(&self) -> &Self::Target { &self.c } } impl DerefMut for ServerConnectionState { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.c } } /// A `AttemptKey` is used to disambiguate connection attempts. /// Multiple connection attempts with the same key won't produce multiple connections. #[derive(Clone, Debug, Hash, PartialEq, Eq)] struct AttemptKey { // Using the remote address is sufficient for disambiguation, // until we support multiple local socket addresses. remote_address: SocketAddr, odcid: ConnectionId, } /// A `ServerZeroRttChecker` is a simple wrapper around a single checker. /// It uses `RefCell` so that the wrapped checker can be shared between /// multiple connections created by the server. #[derive(Clone, Debug)] struct ServerZeroRttChecker { checker: Rc<RefCell<Box<dyn ZeroRttChecker>>>, } impl ServerZeroRttChecker { pub fn new(checker: Box<dyn ZeroRttChecker>) -> Self { Self { checker: Rc::new(RefCell::new(checker)), } } } impl ZeroRttChecker for ServerZeroRttChecker { fn check(&self, token: &[u8]) -> ZeroRttCheckResult { self.checker.borrow().check(token) } } /// `InitialDetails` holds important information for processing `Initial` packets. struct InitialDetails { src_cid: ConnectionId, dst_cid: ConnectionId, token: Vec<u8>, version: Version, } impl InitialDetails { fn new(packet: &PublicPacket) -> Self { Self { src_cid: ConnectionId::from(packet.scid()), dst_cid: ConnectionId::from(packet.dcid()), token: packet.token().to_vec(), version: packet.version().unwrap(), } } } struct EchConfig { config: u8, public_name: String, sk: PrivateKey, pk: PublicKey, encoded: Vec<u8>, } impl EchConfig { fn new(config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey) -> Res<Self> { let encoded = encode_ech_config(config, public_name, pk)?; Ok(Self { config, public_name: String::from(public_name), sk: sk.clone(), pk: pk.clone(), encoded, }) } } pub struct Server { /// The names of certificates. certs: Vec<String>, /// The ALPN values that the server supports. protocols: Vec<String>, /// The cipher suites that the server supports. ciphers: Vec<Cipher>, /// Anti-replay configuration for 0-RTT. anti_replay: AntiReplay, /// A function for determining if 0-RTT can be accepted. zero_rtt_checker: ServerZeroRttChecker, /// A connection ID generator. cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, /// Connection parameters. conn_params: ConnectionParameters, /// Active connection attempts, keyed by `AttemptKey`. Initial packets with /// the same key are routed to the connection that was first accepted. /// This is cleared out when the connection is closed or established. active_attempts: HashMap<AttemptKey, StateRef>, /// All connections, keyed by ConnectionId. connections: ConnectionTableRef, /// The connections that have new events. active: HashSet<ActiveConnectionRef>, /// The set of connections that need immediate processing. waiting: VecDeque<StateRef>, /// Outstanding timers for connections. timers: Timer<StateRef>, /// Address validation logic, which determines whether we send a Retry. address_validation: Rc<RefCell<AddressValidation>>, /// Directory to create qlog traces in qlog_dir: Option<PathBuf>, /// Encrypted client hello (ECH) configuration. ech_config: Option<EchConfig>, } impl Server { /// Construct a new server. /// * `now` is the time that the server is instantiated. /// * `certs` is a list of the certificates that should be configured. /// * `protocols` is the preference list of ALPN values. /// * `anti_replay` is an anti-replay context. /// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This /// will be passed the value of the `extra` argument that was passed to /// `Connection::send_ticket` to see if it is OK. /// * `cid_generator` is responsible for generating connection IDs and parsing them; /// connection IDs produced by the manager cannot be zero-length. pub fn new( now: Instant, certs: &[impl AsRef<str>], protocols: &[impl AsRef<str>], anti_replay: AntiReplay, zero_rtt_checker: Box<dyn ZeroRttChecker>, cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, conn_params: ConnectionParameters, ) -> Res<Self> { let validation = AddressValidation::new(now, ValidateAddress::Never)?; Ok(Self { certs: certs.iter().map(|x| String::from(x.as_ref())).collect(), protocols: protocols.iter().map(|x| String::from(x.as_ref())).collect(), ciphers: Vec::new(), anti_replay, zero_rtt_checker: ServerZeroRttChecker::new(zero_rtt_checker), cid_generator, conn_params, active_attempts: HashMap::default(), connections: Rc::default(), active: HashSet::default(), waiting: VecDeque::default(), timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY), address_validation: Rc::new(RefCell::new(validation)), qlog_dir: None, ech_config: None, }) } /// Set or clear directory to create logs of connection events in QLOG format. pub fn set_qlog_dir(&mut self, dir: Option<PathBuf>) { self.qlog_dir = dir; } /// Set the policy for address validation. pub fn set_validation(&mut self, v: ValidateAddress) { self.address_validation.borrow_mut().set_validation(v); } /// Set the cipher suites that should be used. Set an empty value to use /// default values. pub fn set_ciphers(&mut self, ciphers: impl AsRef<[Cipher]>) { self.ciphers = Vec::from(ciphers.as_ref()); } pub fn enable_ech( &mut self, config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey, ) -> Res<()> { self.ech_config = Some(EchConfig::new(config, public_name, sk, pk)?); Ok(()) } pub fn ech_config(&self) -> &[u8] { self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded) } fn remove_timer(&mut self, c: &StateRef) { let last = c.borrow().last_timer; self.timers.remove(last, |t| Rc::ptr_eq(t, c)); } fn process_connection( &mut self, c: StateRef, dgram: Option<Datagram>, now: Instant, ) -> Option<Datagram> { qtrace!([self], "Process connection {:?}", c); let out = c.borrow_mut().process(dgram, now); match out { Output::Datagram(_) => { qtrace!([self], "Sending packet, added to waiting connections"); self.waiting.push_back(Rc::clone(&c)); } Output::Callback(delay) => { let next = now + delay; if next != c.borrow().last_timer { qtrace!([self], "Change timer to {:?}", next); self.remove_timer(&c); c.borrow_mut().last_timer = next; self.timers.add(next, Rc::clone(&c)); } } _ => { self.remove_timer(&c); } } if c.borrow().has_events() { qtrace!([self], "Connection active: {:?}", c); self.active.insert(ActiveConnectionRef { c: Rc::clone(&c) }); } if *c.borrow().state() > State::Handshaking { // Remove any active connection attempt now that this is no longer handshaking. if let Some(k) = c.borrow_mut().active_attempt.take() { self.active_attempts.remove(&k); } } if matches!(c.borrow().state(), State::Closed(_)) { c.borrow_mut().set_qlog(NeqoQlog::disabled()); self.connections .borrow_mut() .retain(|_, v| !Rc::ptr_eq(v, &c)); } out.dgram() } fn connection(&self, cid: &ConnectionIdRef) -> Option<StateRef> { self.connections.borrow().get(&cid[..]).map(Rc::clone) } fn handle_initial( &mut self, initial: InitialDetails, dgram: Datagram, now: Instant, ) -> Option<Datagram> { qdebug!([self], "Handle initial"); let res = self .address_validation .borrow() .validate(&initial.token, dgram.source(), now); match res { AddressValidationResult::Invalid => None, AddressValidationResult::Pass => self.connection_attempt(initial, dgram, None, now), AddressValidationResult::ValidRetry(orig_dcid) => { self.connection_attempt(initial, dgram, Some(orig_dcid), now) } AddressValidationResult::Validate => { qinfo!([self], "Send retry for {:?}", initial.dst_cid); let res = self.address_validation.borrow().generate_retry_token( &initial.dst_cid, dgram.source(), now, ); let token = if let Ok(t) = res { t } else { qerror!([self], "unable to generate token, dropping packet"); return None; }; if let Some(new_dcid) = self.cid_generator.borrow_mut().generate_cid() { let packet = PacketBuilder::retry( initial.version, &initial.src_cid, &new_dcid, &token, &initial.dst_cid, ); if let Ok(p) = packet { let retry = Datagram::new(dgram.destination(), dgram.source(), p); Some(retry) } else { qerror!([self], "unable to encode retry, dropping packet"); None } } else { qerror!([self], "no connection ID for retry, dropping packet"); None } } } } fn connection_attempt( &mut self, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: orig_dcid.as_ref().unwrap_or(&initial.dst_cid).clone(), }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle Initial for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { self.accept_connection(attempt_key, initial, dgram, orig_dcid, now) } } fn create_qlog_trace(&self, attempt_key: &AttemptKey) -> NeqoQlog { if let Some(qlog_dir) = &self.qlog_dir { let mut qlog_path = qlog_dir.to_path_buf(); qlog_path.push(format!("{}.qlog", attempt_key.odcid)); // The original DCID is chosen by the client. Using create_new() // prevents attackers from overwriting existing logs. match OpenOptions::new() .write(true) .create_new(true) .open(&qlog_path) { Ok(f) => { qinfo!("Qlog output to {}", qlog_path.display()); let streamer = ::qlog::QlogStreamer::new( qlog::QLOG_VERSION.to_string(), Some("Neqo server qlog".to_string()), Some("Neqo server qlog".to_string()), None, std::time::Instant::now(), common::qlog::new_trace(Role::Server), Box::new(f), ); let n_qlog = NeqoQlog::enabled(streamer, qlog_path); match n_qlog { Ok(nql) => nql, Err(e) => { // Keep going but w/o qlogging qerror!("NeqoQlog error: {}", e); NeqoQlog::disabled() } } } Err(e) => { qerror!( "Could not open file {} for qlog output: {}", qlog_path.display(), e ); NeqoQlog::disabled() } } } else { NeqoQlog::disabled() } } fn setup_connection( &mut self, c: &mut Connection, attempt_key: &AttemptKey, initial: InitialDetails, orig_dcid: Option<ConnectionId>, ) { let zcheck = self.zero_rtt_checker.clone(); if c.server_enable_0rtt(&self.anti_replay, zcheck).is_err() { qwarn!([self], "Unable to enable 0-RTT"); } if let Some(odcid) = orig_dcid { // There was a retry, so set the connection IDs for. c.set_retry_cids(odcid, initial.src_cid, initial.dst_cid); } c.set_validation(Rc::clone(&self.address_validation)); c.set_qlog(self.create_qlog_trace(attempt_key)); if let Some(cfg) = &self.ech_config { if c.server_enable_ech(cfg.config, &cfg.public_name, &cfg.sk, &cfg.pk) .is_err() { qwarn!([self], "Unable to enable ECH"); } } } fn accept_connection( &mut self, attempt_key: AttemptKey, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { qinfo!([self], "Accept connection {:?}", attempt_key); // The internal connection ID manager that we use is not used directly. // Instead, wrap it so that we can save connection IDs. let cid_mgr = Rc::new(RefCell::new(ServerConnectionIdGenerator { c: Weak::new(), cid_generator: Rc::clone(&self.cid_generator), connections: Rc::clone(&self.connections), saved_cids: Vec::new(), })); let mut params = self.conn_params.clone(); params.get_versions_mut().set_initial(initial.version); let sconn = Connection::new_server( &self.certs, &self.protocols, Rc::clone(&cid_mgr) as _, params, ); if let Ok(mut c) = sconn { self.setup_connection(&mut c, &attempt_key, initial, orig_dcid); let c = Rc::new(RefCell::new(ServerConnectionState { c, last_timer: now, active_attempt: Some(attempt_key.clone()), })); cid_mgr.borrow_mut().set_connection(Rc::clone(&c)); let previous_attempt = self.active_attempts.insert(attempt_key, Rc::clone(&c)); debug_assert!(previous_attempt.is_none()); self.process_connection(c, Some(dgram), now) } else { qwarn!([self], "Unable to create connection"); None } } /// Handle 0-RTT packets that were sent with the client's choice of connection ID. /// Most 0-RTT will arrive this way. A client can usually send 1-RTT after it /// receives a connection ID from the server. fn handle_0rtt( &mut self, dgram: Datagram, dcid: ConnectionId, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: dcid, }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle 0-RTT for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { qdebug!([self], "Dropping 0-RTT for unknown connection"); None } } fn process_input(&mut self, dgram: Datagram, now: Instant) -> Option<Datagram> { qtrace!("Process datagram: {}", hex(&dgram[..])); // This is only looking at the first packet header in the datagram. // All packets in the datagram are routed to the same connection. let res = PublicPacket::decode(&dgram[..], self.cid_generator.borrow().as_decoder()); let (packet, _remainder) = match res { Ok(res) => res, _ => { qtrace!([self], "Discarding {:?}", dgram); return None; } }; // Finding an existing connection. Should be the most common case. if let Some(c) = self.connection(packet.dcid()) { return self.process_connection(c, Some(dgram), now); } if packet.packet_type() == PacketType::Short { // TODO send a stateless reset here. qtrace!([self], "Short header packet for an unknown connection"); return None; } if packet.packet_type() == PacketType::OtherVersion || (packet.packet_type() == PacketType::Initial && !self .conn_params .get_versions() .all() .contains(&packet.version().unwrap())) { if dgram.len() < MIN_INITIAL_PACKET_SIZE { qdebug!([self], "Unsupported version: too short"); return None; } qdebug!([self], "Unsupported version: {:x}", packet.wire_version()); let vn = PacketBuilder::version_negotiation( packet.scid(), packet.dcid(), packet.wire_version(), self.conn_params.get_versions().all(), ); return Some(Datagram::new(dgram.destination(), dgram.source(), vn)); } match packet.packet_type() { PacketType::Initial => { if dgram.len() < MIN_INITIAL_PACKET_SIZE { qdebug!([self], "Drop initial: too short"); return None; } // Copy values from `packet` because they are currently still borrowing from `dgram`. let initial = InitialDetails::new(&packet); self.handle_initial(initial, dgram, now) } PacketType::ZeroRtt => { let dcid = ConnectionId::from(packet.dcid()); self.handle_0rtt(dgram, dcid, now) } PacketType::OtherVersion => unreachable!(), _ => { qtrace!([self], "Not an initial packet"); None } } } /// Iterate through the pending connections looking for any that might want /// to send a datagram. Stop at the first one that does. fn process_next_output(&mut self, now: Instant) -> Option<Datagram> { qtrace!([self], "No packet to send, look at waiting connections"); while let Some(c) = self.waiting.pop_front() { if let Some(d) = self.process_connection(c, None, now) { return Some(d); } } qtrace!([self], "No packet to send still, run timers"); while let Some(c) = self.timers.take_next(now) { if let Some(d) = self.process_connection(c, None, now) { return Some(d); } } None } fn next_time(&mut self, now: Instant) -> Option<Duration> { if self.waiting.is_empty() { self.timers.next_time().map(|x| x - now) } else { Some(Duration::new(0, 0)) } } pub fn process(&mut self, dgram: Option<Datagram>, now: Instant) -> Output { let out = if let Some(d) = dgram { self.process_input(d, now) } else { None }; let out = out.or_else(|| self.process_next_output(now)); match out { Some(d) => { qtrace!([self], "Send packet: {:?}", d); Output::Datagram(d) } _ => match self.next_time(now) { Some(delay) => { qtrace!([self], "Wait: {:?}", delay); Output::Callback(delay) } _ => { qtrace!([self], "Go dormant"); Output::None } }, } } /// This lists the connections that have received new events /// as a result of calling `process()`. pub fn active_connections(&mut self) -> Vec<ActiveConnectionRef> { mem::take(&mut self.active).into_iter().collect() } pub fn add_to_waiting(&mut self, c: ActiveConnectionRef) { self.waiting.push_back(c.connection()); } } #[derive(Clone, Debug)] pub struct ActiveConnectionRef { c: StateRef, } impl ActiveConnectionRef { pub fn borrow(&self) -> impl Deref<Target = Connection> + '_ { std::cell::Ref::map(self.c.borrow(), |c| &c.c) } pub fn borrow_mut(&mut self) -> impl DerefMut<Target = Connection> + '_ { std::cell::RefMut::map(self.c.borrow_mut(), |c| &mut c.c) } pub fn connection(&self) -> StateRef { Rc::clone(&self.c) } } impl std::hash::Hash for ActiveConnectionRef { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { let ptr: *const _ = self.c.as_ref(); ptr.hash(state) } } impl PartialEq for ActiveConnectionRef { fn eq(&self, other: &Self) -> bool { Rc::ptr_eq(&self.c, &other.c) } } impl Eq for ActiveConnectionRef {} struct ServerConnectionIdGenerator { c: Weak<RefCell<ServerConnectionState>>, connections: ConnectionTableRef, cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, saved_cids: Vec<ConnectionId>, } impl ServerConnectionIdGenerator { pub fn set_connection(&mut self, c: StateRef) { let saved = std::mem::replace(&mut self.saved_cids, Vec::with_capacity(0)); for cid in saved { qtrace!("ServerConnectionIdGenerator inserting saved cid {}", cid); self.insert_cid(cid, Rc::clone(&c)); } self.c = Rc::downgrade(&c); } fn insert_cid(&mut self, cid: ConnectionId, rc: StateRef) { debug_assert!(!cid.is_empty()); self.connections.borrow_mut().insert(cid, rc); } } impl ConnectionIdDecoder for ServerConnectionIdGenerator { fn decode_cid<'a>(&self, dec: &mut Decoder<'a>) -> Option<ConnectionIdRef<'a>> { self.cid_generator.borrow_mut().decode_cid(dec) } } impl ConnectionIdGenerator for ServerConnectionIdGenerator { fn generate_cid(&mut self) -> Option<ConnectionId> { let maybe_cid = self.cid_generator.borrow_mut().generate_cid(); if let Some(cid) = maybe_cid { if let Some(rc) = self.c.upgrade() { self.insert_cid(cid.clone(), rc); } else { // This function can be called before the connection is set. // So save any connection IDs until that hookup happens. qtrace!("ServerConnectionIdGenerator saving cid {}", cid); self.saved_cids.push(cid.clone()); } Some(cid) } else { None } } fn as_decoder(&self) -> &dyn ConnectionIdDecoder { self } } impl ::std::fmt::Display for Server { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "Server") } }
const MIN_INITIAL_PACKET_SIZE: usize = 1200; /// The size of timer buckets. This is higher than the actual timer granularity
random_line_split
server.rs
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // This file implements a server that can handle multiple connections. use neqo_common::{ self as common, event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn, timer::Timer, Datagram, Decoder, Role, }; use neqo_crypto::{ encode_ech_config, AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttCheckResult, ZeroRttChecker, }; pub use crate::addr_valid::ValidateAddress; use crate::addr_valid::{AddressValidation, AddressValidationResult}; use crate::cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef}; use crate::connection::{Connection, Output, State}; use crate::packet::{PacketBuilder, PacketType, PublicPacket}; use crate::{ConnectionParameters, Res, Version}; use std::cell::RefCell; use std::collections::{HashMap, HashSet, VecDeque}; use std::fs::OpenOptions; use std::mem; use std::net::SocketAddr; use std::ops::{Deref, DerefMut}; use std::path::PathBuf; use std::rc::{Rc, Weak}; use std::time::{Duration, Instant}; pub enum InitialResult { Accept, Drop, Retry(Vec<u8>), } /// MIN_INITIAL_PACKET_SIZE is the smallest packet that can be used to establish /// a new connection across all QUIC versions this server supports. const MIN_INITIAL_PACKET_SIZE: usize = 1200; /// The size of timer buckets. This is higher than the actual timer granularity /// as this depends on there being some distribution of events. const TIMER_GRANULARITY: Duration = Duration::from_millis(4); /// The number of buckets in the timer. As mentioned in the definition of `Timer`, /// the granularity and capacity need to multiply to be larger than the largest /// delay that might be used. That's the idle timeout (currently 30s). const TIMER_CAPACITY: usize = 16384; type StateRef = Rc<RefCell<ServerConnectionState>>; type ConnectionTableRef = Rc<RefCell<HashMap<ConnectionId, StateRef>>>; #[derive(Debug)] pub struct ServerConnectionState { c: Connection, active_attempt: Option<AttemptKey>, last_timer: Instant, } impl Deref for ServerConnectionState { type Target = Connection; fn deref(&self) -> &Self::Target
} impl DerefMut for ServerConnectionState { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.c } } /// A `AttemptKey` is used to disambiguate connection attempts. /// Multiple connection attempts with the same key won't produce multiple connections. #[derive(Clone, Debug, Hash, PartialEq, Eq)] struct AttemptKey { // Using the remote address is sufficient for disambiguation, // until we support multiple local socket addresses. remote_address: SocketAddr, odcid: ConnectionId, } /// A `ServerZeroRttChecker` is a simple wrapper around a single checker. /// It uses `RefCell` so that the wrapped checker can be shared between /// multiple connections created by the server. #[derive(Clone, Debug)] struct ServerZeroRttChecker { checker: Rc<RefCell<Box<dyn ZeroRttChecker>>>, } impl ServerZeroRttChecker { pub fn new(checker: Box<dyn ZeroRttChecker>) -> Self { Self { checker: Rc::new(RefCell::new(checker)), } } } impl ZeroRttChecker for ServerZeroRttChecker { fn check(&self, token: &[u8]) -> ZeroRttCheckResult { self.checker.borrow().check(token) } } /// `InitialDetails` holds important information for processing `Initial` packets. struct InitialDetails { src_cid: ConnectionId, dst_cid: ConnectionId, token: Vec<u8>, version: Version, } impl InitialDetails { fn new(packet: &PublicPacket) -> Self { Self { src_cid: ConnectionId::from(packet.scid()), dst_cid: ConnectionId::from(packet.dcid()), token: packet.token().to_vec(), version: packet.version().unwrap(), } } } struct EchConfig { config: u8, public_name: String, sk: PrivateKey, pk: PublicKey, encoded: Vec<u8>, } impl EchConfig { fn new(config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey) -> Res<Self> { let encoded = encode_ech_config(config, public_name, pk)?; Ok(Self { config, public_name: String::from(public_name), sk: sk.clone(), pk: pk.clone(), encoded, }) } } pub struct Server { /// The names of certificates. certs: Vec<String>, /// The ALPN values that the server supports. protocols: Vec<String>, /// The cipher suites that the server supports. ciphers: Vec<Cipher>, /// Anti-replay configuration for 0-RTT. anti_replay: AntiReplay, /// A function for determining if 0-RTT can be accepted. zero_rtt_checker: ServerZeroRttChecker, /// A connection ID generator. cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, /// Connection parameters. conn_params: ConnectionParameters, /// Active connection attempts, keyed by `AttemptKey`. Initial packets with /// the same key are routed to the connection that was first accepted. /// This is cleared out when the connection is closed or established. active_attempts: HashMap<AttemptKey, StateRef>, /// All connections, keyed by ConnectionId. connections: ConnectionTableRef, /// The connections that have new events. active: HashSet<ActiveConnectionRef>, /// The set of connections that need immediate processing. waiting: VecDeque<StateRef>, /// Outstanding timers for connections. timers: Timer<StateRef>, /// Address validation logic, which determines whether we send a Retry. address_validation: Rc<RefCell<AddressValidation>>, /// Directory to create qlog traces in qlog_dir: Option<PathBuf>, /// Encrypted client hello (ECH) configuration. ech_config: Option<EchConfig>, } impl Server { /// Construct a new server. /// * `now` is the time that the server is instantiated. /// * `certs` is a list of the certificates that should be configured. /// * `protocols` is the preference list of ALPN values. /// * `anti_replay` is an anti-replay context. /// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This /// will be passed the value of the `extra` argument that was passed to /// `Connection::send_ticket` to see if it is OK. /// * `cid_generator` is responsible for generating connection IDs and parsing them; /// connection IDs produced by the manager cannot be zero-length. pub fn new( now: Instant, certs: &[impl AsRef<str>], protocols: &[impl AsRef<str>], anti_replay: AntiReplay, zero_rtt_checker: Box<dyn ZeroRttChecker>, cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, conn_params: ConnectionParameters, ) -> Res<Self> { let validation = AddressValidation::new(now, ValidateAddress::Never)?; Ok(Self { certs: certs.iter().map(|x| String::from(x.as_ref())).collect(), protocols: protocols.iter().map(|x| String::from(x.as_ref())).collect(), ciphers: Vec::new(), anti_replay, zero_rtt_checker: ServerZeroRttChecker::new(zero_rtt_checker), cid_generator, conn_params, active_attempts: HashMap::default(), connections: Rc::default(), active: HashSet::default(), waiting: VecDeque::default(), timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY), address_validation: Rc::new(RefCell::new(validation)), qlog_dir: None, ech_config: None, }) } /// Set or clear directory to create logs of connection events in QLOG format. pub fn set_qlog_dir(&mut self, dir: Option<PathBuf>) { self.qlog_dir = dir; } /// Set the policy for address validation. pub fn set_validation(&mut self, v: ValidateAddress) { self.address_validation.borrow_mut().set_validation(v); } /// Set the cipher suites that should be used. Set an empty value to use /// default values. pub fn set_ciphers(&mut self, ciphers: impl AsRef<[Cipher]>) { self.ciphers = Vec::from(ciphers.as_ref()); } pub fn enable_ech( &mut self, config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey, ) -> Res<()> { self.ech_config = Some(EchConfig::new(config, public_name, sk, pk)?); Ok(()) } pub fn ech_config(&self) -> &[u8] { self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded) } fn remove_timer(&mut self, c: &StateRef) { let last = c.borrow().last_timer; self.timers.remove(last, |t| Rc::ptr_eq(t, c)); } fn process_connection( &mut self, c: StateRef, dgram: Option<Datagram>, now: Instant, ) -> Option<Datagram> { qtrace!([self], "Process connection {:?}", c); let out = c.borrow_mut().process(dgram, now); match out { Output::Datagram(_) => { qtrace!([self], "Sending packet, added to waiting connections"); self.waiting.push_back(Rc::clone(&c)); } Output::Callback(delay) => { let next = now + delay; if next != c.borrow().last_timer { qtrace!([self], "Change timer to {:?}", next); self.remove_timer(&c); c.borrow_mut().last_timer = next; self.timers.add(next, Rc::clone(&c)); } } _ => { self.remove_timer(&c); } } if c.borrow().has_events() { qtrace!([self], "Connection active: {:?}", c); self.active.insert(ActiveConnectionRef { c: Rc::clone(&c) }); } if *c.borrow().state() > State::Handshaking { // Remove any active connection attempt now that this is no longer handshaking. if let Some(k) = c.borrow_mut().active_attempt.take() { self.active_attempts.remove(&k); } } if matches!(c.borrow().state(), State::Closed(_)) { c.borrow_mut().set_qlog(NeqoQlog::disabled()); self.connections .borrow_mut() .retain(|_, v| !Rc::ptr_eq(v, &c)); } out.dgram() } fn connection(&self, cid: &ConnectionIdRef) -> Option<StateRef> { self.connections.borrow().get(&cid[..]).map(Rc::clone) } fn handle_initial( &mut self, initial: InitialDetails, dgram: Datagram, now: Instant, ) -> Option<Datagram> { qdebug!([self], "Handle initial"); let res = self .address_validation .borrow() .validate(&initial.token, dgram.source(), now); match res { AddressValidationResult::Invalid => None, AddressValidationResult::Pass => self.connection_attempt(initial, dgram, None, now), AddressValidationResult::ValidRetry(orig_dcid) => { self.connection_attempt(initial, dgram, Some(orig_dcid), now) } AddressValidationResult::Validate => { qinfo!([self], "Send retry for {:?}", initial.dst_cid); let res = self.address_validation.borrow().generate_retry_token( &initial.dst_cid, dgram.source(), now, ); let token = if let Ok(t) = res { t } else { qerror!([self], "unable to generate token, dropping packet"); return None; }; if let Some(new_dcid) = self.cid_generator.borrow_mut().generate_cid() { let packet = PacketBuilder::retry( initial.version, &initial.src_cid, &new_dcid, &token, &initial.dst_cid, ); if let Ok(p) = packet { let retry = Datagram::new(dgram.destination(), dgram.source(), p); Some(retry) } else { qerror!([self], "unable to encode retry, dropping packet"); None } } else { qerror!([self], "no connection ID for retry, dropping packet"); None } } } } fn connection_attempt( &mut self, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: orig_dcid.as_ref().unwrap_or(&initial.dst_cid).clone(), }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle Initial for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { self.accept_connection(attempt_key, initial, dgram, orig_dcid, now) } } fn create_qlog_trace(&self, attempt_key: &AttemptKey) -> NeqoQlog { if let Some(qlog_dir) = &self.qlog_dir { let mut qlog_path = qlog_dir.to_path_buf(); qlog_path.push(format!("{}.qlog", attempt_key.odcid)); // The original DCID is chosen by the client. Using create_new() // prevents attackers from overwriting existing logs. match OpenOptions::new() .write(true) .create_new(true) .open(&qlog_path) { Ok(f) => { qinfo!("Qlog output to {}", qlog_path.display()); let streamer = ::qlog::QlogStreamer::new( qlog::QLOG_VERSION.to_string(), Some("Neqo server qlog".to_string()), Some("Neqo server qlog".to_string()), None, std::time::Instant::now(), common::qlog::new_trace(Role::Server), Box::new(f), ); let n_qlog = NeqoQlog::enabled(streamer, qlog_path); match n_qlog { Ok(nql) => nql, Err(e) => { // Keep going but w/o qlogging qerror!("NeqoQlog error: {}", e); NeqoQlog::disabled() } } } Err(e) => { qerror!( "Could not open file {} for qlog output: {}", qlog_path.display(), e ); NeqoQlog::disabled() } } } else { NeqoQlog::disabled() } } fn setup_connection( &mut self, c: &mut Connection, attempt_key: &AttemptKey, initial: InitialDetails, orig_dcid: Option<ConnectionId>, ) { let zcheck = self.zero_rtt_checker.clone(); if c.server_enable_0rtt(&self.anti_replay, zcheck).is_err() { qwarn!([self], "Unable to enable 0-RTT"); } if let Some(odcid) = orig_dcid { // There was a retry, so set the connection IDs for. c.set_retry_cids(odcid, initial.src_cid, initial.dst_cid); } c.set_validation(Rc::clone(&self.address_validation)); c.set_qlog(self.create_qlog_trace(attempt_key)); if let Some(cfg) = &self.ech_config { if c.server_enable_ech(cfg.config, &cfg.public_name, &cfg.sk, &cfg.pk) .is_err() { qwarn!([self], "Unable to enable ECH"); } } } fn accept_connection( &mut self, attempt_key: AttemptKey, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { qinfo!([self], "Accept connection {:?}", attempt_key); // The internal connection ID manager that we use is not used directly. // Instead, wrap it so that we can save connection IDs. let cid_mgr = Rc::new(RefCell::new(ServerConnectionIdGenerator { c: Weak::new(), cid_generator: Rc::clone(&self.cid_generator), connections: Rc::clone(&self.connections), saved_cids: Vec::new(), })); let mut params = self.conn_params.clone(); params.get_versions_mut().set_initial(initial.version); let sconn = Connection::new_server( &self.certs, &self.protocols, Rc::clone(&cid_mgr) as _, params, ); if let Ok(mut c) = sconn { self.setup_connection(&mut c, &attempt_key, initial, orig_dcid); let c = Rc::new(RefCell::new(ServerConnectionState { c, last_timer: now, active_attempt: Some(attempt_key.clone()), })); cid_mgr.borrow_mut().set_connection(Rc::clone(&c)); let previous_attempt = self.active_attempts.insert(attempt_key, Rc::clone(&c)); debug_assert!(previous_attempt.is_none()); self.process_connection(c, Some(dgram), now) } else { qwarn!([self], "Unable to create connection"); None } } /// Handle 0-RTT packets that were sent with the client's choice of connection ID. /// Most 0-RTT will arrive this way. A client can usually send 1-RTT after it /// receives a connection ID from the server. fn handle_0rtt( &mut self, dgram: Datagram, dcid: ConnectionId, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: dcid, }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle 0-RTT for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { qdebug!([self], "Dropping 0-RTT for unknown connection"); None } } fn process_input(&mut self, dgram: Datagram, now: Instant) -> Option<Datagram> { qtrace!("Process datagram: {}", hex(&dgram[..])); // This is only looking at the first packet header in the datagram. // All packets in the datagram are routed to the same connection. let res = PublicPacket::decode(&dgram[..], self.cid_generator.borrow().as_decoder()); let (packet, _remainder) = match res { Ok(res) => res, _ => { qtrace!([self], "Discarding {:?}", dgram); return None; } }; // Finding an existing connection. Should be the most common case. if let Some(c) = self.connection(packet.dcid()) { return self.process_connection(c, Some(dgram), now); } if packet.packet_type() == PacketType::Short { // TODO send a stateless reset here. qtrace!([self], "Short header packet for an unknown connection"); return None; } if packet.packet_type() == PacketType::OtherVersion || (packet.packet_type() == PacketType::Initial && !self .conn_params .get_versions() .all() .contains(&packet.version().unwrap())) { if dgram.len() < MIN_INITIAL_PACKET_SIZE { qdebug!([self], "Unsupported version: too short"); return None; } qdebug!([self], "Unsupported version: {:x}", packet.wire_version()); let vn = PacketBuilder::version_negotiation( packet.scid(), packet.dcid(), packet.wire_version(), self.conn_params.get_versions().all(), ); return Some(Datagram::new(dgram.destination(), dgram.source(), vn)); } match packet.packet_type() { PacketType::Initial => { if dgram.len() < MIN_INITIAL_PACKET_SIZE { qdebug!([self], "Drop initial: too short"); return None; } // Copy values from `packet` because they are currently still borrowing from `dgram`. let initial = InitialDetails::new(&packet); self.handle_initial(initial, dgram, now) } PacketType::ZeroRtt => { let dcid = ConnectionId::from(packet.dcid()); self.handle_0rtt(dgram, dcid, now) } PacketType::OtherVersion => unreachable!(), _ => { qtrace!([self], "Not an initial packet"); None } } } /// Iterate through the pending connections looking for any that might want /// to send a datagram. Stop at the first one that does. fn process_next_output(&mut self, now: Instant) -> Option<Datagram> { qtrace!([self], "No packet to send, look at waiting connections"); while let Some(c) = self.waiting.pop_front() { if let Some(d) = self.process_connection(c, None, now) { return Some(d); } } qtrace!([self], "No packet to send still, run timers"); while let Some(c) = self.timers.take_next(now) { if let Some(d) = self.process_connection(c, None, now) { return Some(d); } } None } fn next_time(&mut self, now: Instant) -> Option<Duration> { if self.waiting.is_empty() { self.timers.next_time().map(|x| x - now) } else { Some(Duration::new(0, 0)) } } pub fn process(&mut self, dgram: Option<Datagram>, now: Instant) -> Output { let out = if let Some(d) = dgram { self.process_input(d, now) } else { None }; let out = out.or_else(|| self.process_next_output(now)); match out { Some(d) => { qtrace!([self], "Send packet: {:?}", d); Output::Datagram(d) } _ => match self.next_time(now) { Some(delay) => { qtrace!([self], "Wait: {:?}", delay); Output::Callback(delay) } _ => { qtrace!([self], "Go dormant"); Output::None } }, } } /// This lists the connections that have received new events /// as a result of calling `process()`. pub fn active_connections(&mut self) -> Vec<ActiveConnectionRef> { mem::take(&mut self.active).into_iter().collect() } pub fn add_to_waiting(&mut self, c: ActiveConnectionRef) { self.waiting.push_back(c.connection()); } } #[derive(Clone, Debug)] pub struct ActiveConnectionRef { c: StateRef, } impl ActiveConnectionRef { pub fn borrow(&self) -> impl Deref<Target = Connection> + '_ { std::cell::Ref::map(self.c.borrow(), |c| &c.c) } pub fn borrow_mut(&mut self) -> impl DerefMut<Target = Connection> + '_ { std::cell::RefMut::map(self.c.borrow_mut(), |c| &mut c.c) } pub fn connection(&self) -> StateRef { Rc::clone(&self.c) } } impl std::hash::Hash for ActiveConnectionRef { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { let ptr: *const _ = self.c.as_ref(); ptr.hash(state) } } impl PartialEq for ActiveConnectionRef { fn eq(&self, other: &Self) -> bool { Rc::ptr_eq(&self.c, &other.c) } } impl Eq for ActiveConnectionRef {} struct ServerConnectionIdGenerator { c: Weak<RefCell<ServerConnectionState>>, connections: ConnectionTableRef, cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, saved_cids: Vec<ConnectionId>, } impl ServerConnectionIdGenerator { pub fn set_connection(&mut self, c: StateRef) { let saved = std::mem::replace(&mut self.saved_cids, Vec::with_capacity(0)); for cid in saved { qtrace!("ServerConnectionIdGenerator inserting saved cid {}", cid); self.insert_cid(cid, Rc::clone(&c)); } self.c = Rc::downgrade(&c); } fn insert_cid(&mut self, cid: ConnectionId, rc: StateRef) { debug_assert!(!cid.is_empty()); self.connections.borrow_mut().insert(cid, rc); } } impl ConnectionIdDecoder for ServerConnectionIdGenerator { fn decode_cid<'a>(&self, dec: &mut Decoder<'a>) -> Option<ConnectionIdRef<'a>> { self.cid_generator.borrow_mut().decode_cid(dec) } } impl ConnectionIdGenerator for ServerConnectionIdGenerator { fn generate_cid(&mut self) -> Option<ConnectionId> { let maybe_cid = self.cid_generator.borrow_mut().generate_cid(); if let Some(cid) = maybe_cid { if let Some(rc) = self.c.upgrade() { self.insert_cid(cid.clone(), rc); } else { // This function can be called before the connection is set. // So save any connection IDs until that hookup happens. qtrace!("ServerConnectionIdGenerator saving cid {}", cid); self.saved_cids.push(cid.clone()); } Some(cid) } else { None } } fn as_decoder(&self) -> &dyn ConnectionIdDecoder { self } } impl ::std::fmt::Display for Server { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "Server") } }
{ &self.c }
identifier_body
server.rs
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // This file implements a server that can handle multiple connections. use neqo_common::{ self as common, event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn, timer::Timer, Datagram, Decoder, Role, }; use neqo_crypto::{ encode_ech_config, AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttCheckResult, ZeroRttChecker, }; pub use crate::addr_valid::ValidateAddress; use crate::addr_valid::{AddressValidation, AddressValidationResult}; use crate::cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef}; use crate::connection::{Connection, Output, State}; use crate::packet::{PacketBuilder, PacketType, PublicPacket}; use crate::{ConnectionParameters, Res, Version}; use std::cell::RefCell; use std::collections::{HashMap, HashSet, VecDeque}; use std::fs::OpenOptions; use std::mem; use std::net::SocketAddr; use std::ops::{Deref, DerefMut}; use std::path::PathBuf; use std::rc::{Rc, Weak}; use std::time::{Duration, Instant}; pub enum InitialResult { Accept, Drop, Retry(Vec<u8>), } /// MIN_INITIAL_PACKET_SIZE is the smallest packet that can be used to establish /// a new connection across all QUIC versions this server supports. const MIN_INITIAL_PACKET_SIZE: usize = 1200; /// The size of timer buckets. This is higher than the actual timer granularity /// as this depends on there being some distribution of events. const TIMER_GRANULARITY: Duration = Duration::from_millis(4); /// The number of buckets in the timer. As mentioned in the definition of `Timer`, /// the granularity and capacity need to multiply to be larger than the largest /// delay that might be used. That's the idle timeout (currently 30s). const TIMER_CAPACITY: usize = 16384; type StateRef = Rc<RefCell<ServerConnectionState>>; type ConnectionTableRef = Rc<RefCell<HashMap<ConnectionId, StateRef>>>; #[derive(Debug)] pub struct ServerConnectionState { c: Connection, active_attempt: Option<AttemptKey>, last_timer: Instant, } impl Deref for ServerConnectionState { type Target = Connection; fn deref(&self) -> &Self::Target { &self.c } } impl DerefMut for ServerConnectionState { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.c } } /// A `AttemptKey` is used to disambiguate connection attempts. /// Multiple connection attempts with the same key won't produce multiple connections. #[derive(Clone, Debug, Hash, PartialEq, Eq)] struct
{ // Using the remote address is sufficient for disambiguation, // until we support multiple local socket addresses. remote_address: SocketAddr, odcid: ConnectionId, } /// A `ServerZeroRttChecker` is a simple wrapper around a single checker. /// It uses `RefCell` so that the wrapped checker can be shared between /// multiple connections created by the server. #[derive(Clone, Debug)] struct ServerZeroRttChecker { checker: Rc<RefCell<Box<dyn ZeroRttChecker>>>, } impl ServerZeroRttChecker { pub fn new(checker: Box<dyn ZeroRttChecker>) -> Self { Self { checker: Rc::new(RefCell::new(checker)), } } } impl ZeroRttChecker for ServerZeroRttChecker { fn check(&self, token: &[u8]) -> ZeroRttCheckResult { self.checker.borrow().check(token) } } /// `InitialDetails` holds important information for processing `Initial` packets. struct InitialDetails { src_cid: ConnectionId, dst_cid: ConnectionId, token: Vec<u8>, version: Version, } impl InitialDetails { fn new(packet: &PublicPacket) -> Self { Self { src_cid: ConnectionId::from(packet.scid()), dst_cid: ConnectionId::from(packet.dcid()), token: packet.token().to_vec(), version: packet.version().unwrap(), } } } struct EchConfig { config: u8, public_name: String, sk: PrivateKey, pk: PublicKey, encoded: Vec<u8>, } impl EchConfig { fn new(config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey) -> Res<Self> { let encoded = encode_ech_config(config, public_name, pk)?; Ok(Self { config, public_name: String::from(public_name), sk: sk.clone(), pk: pk.clone(), encoded, }) } } pub struct Server { /// The names of certificates. certs: Vec<String>, /// The ALPN values that the server supports. protocols: Vec<String>, /// The cipher suites that the server supports. ciphers: Vec<Cipher>, /// Anti-replay configuration for 0-RTT. anti_replay: AntiReplay, /// A function for determining if 0-RTT can be accepted. zero_rtt_checker: ServerZeroRttChecker, /// A connection ID generator. cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, /// Connection parameters. conn_params: ConnectionParameters, /// Active connection attempts, keyed by `AttemptKey`. Initial packets with /// the same key are routed to the connection that was first accepted. /// This is cleared out when the connection is closed or established. active_attempts: HashMap<AttemptKey, StateRef>, /// All connections, keyed by ConnectionId. connections: ConnectionTableRef, /// The connections that have new events. active: HashSet<ActiveConnectionRef>, /// The set of connections that need immediate processing. waiting: VecDeque<StateRef>, /// Outstanding timers for connections. timers: Timer<StateRef>, /// Address validation logic, which determines whether we send a Retry. address_validation: Rc<RefCell<AddressValidation>>, /// Directory to create qlog traces in qlog_dir: Option<PathBuf>, /// Encrypted client hello (ECH) configuration. ech_config: Option<EchConfig>, } impl Server { /// Construct a new server. /// * `now` is the time that the server is instantiated. /// * `certs` is a list of the certificates that should be configured. /// * `protocols` is the preference list of ALPN values. /// * `anti_replay` is an anti-replay context. /// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This /// will be passed the value of the `extra` argument that was passed to /// `Connection::send_ticket` to see if it is OK. /// * `cid_generator` is responsible for generating connection IDs and parsing them; /// connection IDs produced by the manager cannot be zero-length. pub fn new( now: Instant, certs: &[impl AsRef<str>], protocols: &[impl AsRef<str>], anti_replay: AntiReplay, zero_rtt_checker: Box<dyn ZeroRttChecker>, cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, conn_params: ConnectionParameters, ) -> Res<Self> { let validation = AddressValidation::new(now, ValidateAddress::Never)?; Ok(Self { certs: certs.iter().map(|x| String::from(x.as_ref())).collect(), protocols: protocols.iter().map(|x| String::from(x.as_ref())).collect(), ciphers: Vec::new(), anti_replay, zero_rtt_checker: ServerZeroRttChecker::new(zero_rtt_checker), cid_generator, conn_params, active_attempts: HashMap::default(), connections: Rc::default(), active: HashSet::default(), waiting: VecDeque::default(), timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY), address_validation: Rc::new(RefCell::new(validation)), qlog_dir: None, ech_config: None, }) } /// Set or clear directory to create logs of connection events in QLOG format. pub fn set_qlog_dir(&mut self, dir: Option<PathBuf>) { self.qlog_dir = dir; } /// Set the policy for address validation. pub fn set_validation(&mut self, v: ValidateAddress) { self.address_validation.borrow_mut().set_validation(v); } /// Set the cipher suites that should be used. Set an empty value to use /// default values. pub fn set_ciphers(&mut self, ciphers: impl AsRef<[Cipher]>) { self.ciphers = Vec::from(ciphers.as_ref()); } pub fn enable_ech( &mut self, config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey, ) -> Res<()> { self.ech_config = Some(EchConfig::new(config, public_name, sk, pk)?); Ok(()) } pub fn ech_config(&self) -> &[u8] { self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded) } fn remove_timer(&mut self, c: &StateRef) { let last = c.borrow().last_timer; self.timers.remove(last, |t| Rc::ptr_eq(t, c)); } fn process_connection( &mut self, c: StateRef, dgram: Option<Datagram>, now: Instant, ) -> Option<Datagram> { qtrace!([self], "Process connection {:?}", c); let out = c.borrow_mut().process(dgram, now); match out { Output::Datagram(_) => { qtrace!([self], "Sending packet, added to waiting connections"); self.waiting.push_back(Rc::clone(&c)); } Output::Callback(delay) => { let next = now + delay; if next != c.borrow().last_timer { qtrace!([self], "Change timer to {:?}", next); self.remove_timer(&c); c.borrow_mut().last_timer = next; self.timers.add(next, Rc::clone(&c)); } } _ => { self.remove_timer(&c); } } if c.borrow().has_events() { qtrace!([self], "Connection active: {:?}", c); self.active.insert(ActiveConnectionRef { c: Rc::clone(&c) }); } if *c.borrow().state() > State::Handshaking { // Remove any active connection attempt now that this is no longer handshaking. if let Some(k) = c.borrow_mut().active_attempt.take() { self.active_attempts.remove(&k); } } if matches!(c.borrow().state(), State::Closed(_)) { c.borrow_mut().set_qlog(NeqoQlog::disabled()); self.connections .borrow_mut() .retain(|_, v| !Rc::ptr_eq(v, &c)); } out.dgram() } fn connection(&self, cid: &ConnectionIdRef) -> Option<StateRef> { self.connections.borrow().get(&cid[..]).map(Rc::clone) } fn handle_initial( &mut self, initial: InitialDetails, dgram: Datagram, now: Instant, ) -> Option<Datagram> { qdebug!([self], "Handle initial"); let res = self .address_validation .borrow() .validate(&initial.token, dgram.source(), now); match res { AddressValidationResult::Invalid => None, AddressValidationResult::Pass => self.connection_attempt(initial, dgram, None, now), AddressValidationResult::ValidRetry(orig_dcid) => { self.connection_attempt(initial, dgram, Some(orig_dcid), now) } AddressValidationResult::Validate => { qinfo!([self], "Send retry for {:?}", initial.dst_cid); let res = self.address_validation.borrow().generate_retry_token( &initial.dst_cid, dgram.source(), now, ); let token = if let Ok(t) = res { t } else { qerror!([self], "unable to generate token, dropping packet"); return None; }; if let Some(new_dcid) = self.cid_generator.borrow_mut().generate_cid() { let packet = PacketBuilder::retry( initial.version, &initial.src_cid, &new_dcid, &token, &initial.dst_cid, ); if let Ok(p) = packet { let retry = Datagram::new(dgram.destination(), dgram.source(), p); Some(retry) } else { qerror!([self], "unable to encode retry, dropping packet"); None } } else { qerror!([self], "no connection ID for retry, dropping packet"); None } } } } fn connection_attempt( &mut self, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: orig_dcid.as_ref().unwrap_or(&initial.dst_cid).clone(), }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle Initial for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { self.accept_connection(attempt_key, initial, dgram, orig_dcid, now) } } fn create_qlog_trace(&self, attempt_key: &AttemptKey) -> NeqoQlog { if let Some(qlog_dir) = &self.qlog_dir { let mut qlog_path = qlog_dir.to_path_buf(); qlog_path.push(format!("{}.qlog", attempt_key.odcid)); // The original DCID is chosen by the client. Using create_new() // prevents attackers from overwriting existing logs. match OpenOptions::new() .write(true) .create_new(true) .open(&qlog_path) { Ok(f) => { qinfo!("Qlog output to {}", qlog_path.display()); let streamer = ::qlog::QlogStreamer::new( qlog::QLOG_VERSION.to_string(), Some("Neqo server qlog".to_string()), Some("Neqo server qlog".to_string()), None, std::time::Instant::now(), common::qlog::new_trace(Role::Server), Box::new(f), ); let n_qlog = NeqoQlog::enabled(streamer, qlog_path); match n_qlog { Ok(nql) => nql, Err(e) => { // Keep going but w/o qlogging qerror!("NeqoQlog error: {}", e); NeqoQlog::disabled() } } } Err(e) => { qerror!( "Could not open file {} for qlog output: {}", qlog_path.display(), e ); NeqoQlog::disabled() } } } else { NeqoQlog::disabled() } } fn setup_connection( &mut self, c: &mut Connection, attempt_key: &AttemptKey, initial: InitialDetails, orig_dcid: Option<ConnectionId>, ) { let zcheck = self.zero_rtt_checker.clone(); if c.server_enable_0rtt(&self.anti_replay, zcheck).is_err() { qwarn!([self], "Unable to enable 0-RTT"); } if let Some(odcid) = orig_dcid { // There was a retry, so set the connection IDs for. c.set_retry_cids(odcid, initial.src_cid, initial.dst_cid); } c.set_validation(Rc::clone(&self.address_validation)); c.set_qlog(self.create_qlog_trace(attempt_key)); if let Some(cfg) = &self.ech_config { if c.server_enable_ech(cfg.config, &cfg.public_name, &cfg.sk, &cfg.pk) .is_err() { qwarn!([self], "Unable to enable ECH"); } } } fn accept_connection( &mut self, attempt_key: AttemptKey, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { qinfo!([self], "Accept connection {:?}", attempt_key); // The internal connection ID manager that we use is not used directly. // Instead, wrap it so that we can save connection IDs. let cid_mgr = Rc::new(RefCell::new(ServerConnectionIdGenerator { c: Weak::new(), cid_generator: Rc::clone(&self.cid_generator), connections: Rc::clone(&self.connections), saved_cids: Vec::new(), })); let mut params = self.conn_params.clone(); params.get_versions_mut().set_initial(initial.version); let sconn = Connection::new_server( &self.certs, &self.protocols, Rc::clone(&cid_mgr) as _, params, ); if let Ok(mut c) = sconn { self.setup_connection(&mut c, &attempt_key, initial, orig_dcid); let c = Rc::new(RefCell::new(ServerConnectionState { c, last_timer: now, active_attempt: Some(attempt_key.clone()), })); cid_mgr.borrow_mut().set_connection(Rc::clone(&c)); let previous_attempt = self.active_attempts.insert(attempt_key, Rc::clone(&c)); debug_assert!(previous_attempt.is_none()); self.process_connection(c, Some(dgram), now) } else { qwarn!([self], "Unable to create connection"); None } } /// Handle 0-RTT packets that were sent with the client's choice of connection ID. /// Most 0-RTT will arrive this way. A client can usually send 1-RTT after it /// receives a connection ID from the server. fn handle_0rtt( &mut self, dgram: Datagram, dcid: ConnectionId, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: dcid, }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle 0-RTT for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { qdebug!([self], "Dropping 0-RTT for unknown connection"); None } } fn process_input(&mut self, dgram: Datagram, now: Instant) -> Option<Datagram> { qtrace!("Process datagram: {}", hex(&dgram[..])); // This is only looking at the first packet header in the datagram. // All packets in the datagram are routed to the same connection. let res = PublicPacket::decode(&dgram[..], self.cid_generator.borrow().as_decoder()); let (packet, _remainder) = match res { Ok(res) => res, _ => { qtrace!([self], "Discarding {:?}", dgram); return None; } }; // Finding an existing connection. Should be the most common case. if let Some(c) = self.connection(packet.dcid()) { return self.process_connection(c, Some(dgram), now); } if packet.packet_type() == PacketType::Short { // TODO send a stateless reset here. qtrace!([self], "Short header packet for an unknown connection"); return None; } if packet.packet_type() == PacketType::OtherVersion || (packet.packet_type() == PacketType::Initial && !self .conn_params .get_versions() .all() .contains(&packet.version().unwrap())) { if dgram.len() < MIN_INITIAL_PACKET_SIZE { qdebug!([self], "Unsupported version: too short"); return None; } qdebug!([self], "Unsupported version: {:x}", packet.wire_version()); let vn = PacketBuilder::version_negotiation( packet.scid(), packet.dcid(), packet.wire_version(), self.conn_params.get_versions().all(), ); return Some(Datagram::new(dgram.destination(), dgram.source(), vn)); } match packet.packet_type() { PacketType::Initial => { if dgram.len() < MIN_INITIAL_PACKET_SIZE { qdebug!([self], "Drop initial: too short"); return None; } // Copy values from `packet` because they are currently still borrowing from `dgram`. let initial = InitialDetails::new(&packet); self.handle_initial(initial, dgram, now) } PacketType::ZeroRtt => { let dcid = ConnectionId::from(packet.dcid()); self.handle_0rtt(dgram, dcid, now) } PacketType::OtherVersion => unreachable!(), _ => { qtrace!([self], "Not an initial packet"); None } } } /// Iterate through the pending connections looking for any that might want /// to send a datagram. Stop at the first one that does. fn process_next_output(&mut self, now: Instant) -> Option<Datagram> { qtrace!([self], "No packet to send, look at waiting connections"); while let Some(c) = self.waiting.pop_front() { if let Some(d) = self.process_connection(c, None, now) { return Some(d); } } qtrace!([self], "No packet to send still, run timers"); while let Some(c) = self.timers.take_next(now) { if let Some(d) = self.process_connection(c, None, now) { return Some(d); } } None } fn next_time(&mut self, now: Instant) -> Option<Duration> { if self.waiting.is_empty() { self.timers.next_time().map(|x| x - now) } else { Some(Duration::new(0, 0)) } } pub fn process(&mut self, dgram: Option<Datagram>, now: Instant) -> Output { let out = if let Some(d) = dgram { self.process_input(d, now) } else { None }; let out = out.or_else(|| self.process_next_output(now)); match out { Some(d) => { qtrace!([self], "Send packet: {:?}", d); Output::Datagram(d) } _ => match self.next_time(now) { Some(delay) => { qtrace!([self], "Wait: {:?}", delay); Output::Callback(delay) } _ => { qtrace!([self], "Go dormant"); Output::None } }, } } /// This lists the connections that have received new events /// as a result of calling `process()`. pub fn active_connections(&mut self) -> Vec<ActiveConnectionRef> { mem::take(&mut self.active).into_iter().collect() } pub fn add_to_waiting(&mut self, c: ActiveConnectionRef) { self.waiting.push_back(c.connection()); } } #[derive(Clone, Debug)] pub struct ActiveConnectionRef { c: StateRef, } impl ActiveConnectionRef { pub fn borrow(&self) -> impl Deref<Target = Connection> + '_ { std::cell::Ref::map(self.c.borrow(), |c| &c.c) } pub fn borrow_mut(&mut self) -> impl DerefMut<Target = Connection> + '_ { std::cell::RefMut::map(self.c.borrow_mut(), |c| &mut c.c) } pub fn connection(&self) -> StateRef { Rc::clone(&self.c) } } impl std::hash::Hash for ActiveConnectionRef { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { let ptr: *const _ = self.c.as_ref(); ptr.hash(state) } } impl PartialEq for ActiveConnectionRef { fn eq(&self, other: &Self) -> bool { Rc::ptr_eq(&self.c, &other.c) } } impl Eq for ActiveConnectionRef {} struct ServerConnectionIdGenerator { c: Weak<RefCell<ServerConnectionState>>, connections: ConnectionTableRef, cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, saved_cids: Vec<ConnectionId>, } impl ServerConnectionIdGenerator { pub fn set_connection(&mut self, c: StateRef) { let saved = std::mem::replace(&mut self.saved_cids, Vec::with_capacity(0)); for cid in saved { qtrace!("ServerConnectionIdGenerator inserting saved cid {}", cid); self.insert_cid(cid, Rc::clone(&c)); } self.c = Rc::downgrade(&c); } fn insert_cid(&mut self, cid: ConnectionId, rc: StateRef) { debug_assert!(!cid.is_empty()); self.connections.borrow_mut().insert(cid, rc); } } impl ConnectionIdDecoder for ServerConnectionIdGenerator { fn decode_cid<'a>(&self, dec: &mut Decoder<'a>) -> Option<ConnectionIdRef<'a>> { self.cid_generator.borrow_mut().decode_cid(dec) } } impl ConnectionIdGenerator for ServerConnectionIdGenerator { fn generate_cid(&mut self) -> Option<ConnectionId> { let maybe_cid = self.cid_generator.borrow_mut().generate_cid(); if let Some(cid) = maybe_cid { if let Some(rc) = self.c.upgrade() { self.insert_cid(cid.clone(), rc); } else { // This function can be called before the connection is set. // So save any connection IDs until that hookup happens. qtrace!("ServerConnectionIdGenerator saving cid {}", cid); self.saved_cids.push(cid.clone()); } Some(cid) } else { None } } fn as_decoder(&self) -> &dyn ConnectionIdDecoder { self } } impl ::std::fmt::Display for Server { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "Server") } }
AttemptKey
identifier_name
replay.py
import random from collections import deque from environment import get_env import numpy as np import heapq from itertools import count class BufferSizeManager: def __init__(self, initial_capacity, size_change=20): """Adaptive buffer size. If size_change > 1: Linear buffer update as in: https://arxiv.org/pdf/1710.06574.pdf If size_change in [0, 1]: Percentage update. If size_change < 0 then the algorithm works in the inverse manner as described in the paper. You should imagine the buffer manager as a mid-aged fat man that believes his role is key in the success of the company, even though many people think they could do without him.""" self.capacity = initial_capacity self.k = size_change self.td_error = 0 def update_td_error(self, new_td_error): self.td_error = abs(new_td_error) def update_memory_size(self, new_td_error): new_td_error = abs(new_td_error) # update = -1 if new_td_error < self.td_error, then the buffer must decrease; # update = 1 if new_td_error > self.td_error, than the buffer must increase; # update = 0 if new_td_error = self.td_error, buffer size remains constant. delta = new_td_error - self.td_error e = 1e-7 if abs(delta) < e: # for numeric stability return self.capacity update = delta / abs(delta) # allow for non-linear update (not covered in the method proposed by the paper) if abs(self.k) < 1: update *= int(self.capacity * self.k) else: update *= int(self.k) # Update the buffer size self.capacity = max(self.capacity + update, 1) # Update the stored td_error self.update_td_error(new_td_error) return self.capacity class NaiveReplayMemory: def __init__(self, capacity): self.capacity = capacity # List is necessary for dynamic buffer self.memory = [] # deque(maxlen=capacity) def pop(self, idx=0): # Pop is redefined as taking the oldest element (FIFO) for convinience. return self.memory.pop(idx) def memory_full(self): return len(self.memory) >= self.capacity def push(self, transition): while len(self.memory) >= self.capacity: _ = self.pop() self.memory.append(transition) def sample(self, batch_size): return random.sample(self.memory, batch_size) def resize_memory(self, new_size=None): """Redefines the size of the buffer. Inputs: new_size (type: int), capacity = new_size.""" self.capacity = new_size # self.push() takes care of decreasing the memory. # # Oldest experiences are discarded. For Ever. # # TODO: Check for a more efficient way of cleaning the memory. # while len(self.memory) > self.capacity: # _ = self.pop() def __len__(self): return len(self.memory) # Add different experience replay methods class CombinedReplayMemory(NaiveReplayMemory): def push(self, transition): while len(self.memory) >= self.capacity: _ = self.pop() self.memory.append(transition) self.last_transition = transition def sample(self, batch_size): samples = random.sample(self.memory, batch_size - 1) samples.append(self.last_transition) return samples class SumTree: # started from https://github.com/wotmd5731/dqn/blob/master/memory.py write = 0 def __init__(self, max_capacity): self.capacity = max_capacity self.tree = np.zeros(2 * max_capacity - 1) # [--------------Parent nodes-------------][-------leaves to recode priority-------] # size: capacity - 1 size: capacity self.data = np.zeros(max_capacity, dtype=object) # for all transitions # [--------------data frame-------------] # size: capacity self.num = 0 self.e = 0.01 # small amount to avoid zero priority self.a = 0.6 # [0~1] convert the importance of TD error to priority def _get_priority(self, error): return (error + self.e) ** self.a def _propagate_old(self, idx, change): parent = (idx - 1) // 2 self.tree[parent] += change if parent != 0: self._propagate(parent, change) def _propagate(self, idx): parent = (idx - 1) // 2 left = parent * 2 + 1 right = parent * 2 + 2 self.tree[parent] = self.tree[right] + self.tree[left] if parent != 0: self._propagate(parent) def _retrieve(self, idx, rand): """ Tree structure and array storage: Tree index: 0 -> storing priority sum / \ 1 2 / \ / \ 3 4 5 6 -> storing priority for transitions Array type for storing: [0,1,2,3,4,5,6] """ left = 2 * idx + 1 right = left + 1 if left >= len(self.tree): # end search when no more child return idx if rand <= self.tree[left]: # downward search, always search for a higher priority node return self._retrieve(left, rand) else: return self._retrieve(right, rand - self.tree[left]) def _total(self): return self.tree[0] # the root def add(self, error, data): idx = self.write + self.capacity - 1 self.data[self.write] = data # update data_frame self.update(idx, error) # update tree_frame self.write += 1 if self.write >= self.capacity: # replace when exceed the capacity self.write = 0 if self.num < self.capacity: self.num += 1 def update(self, idx, error): p = self._get_priority(error) # change = p - self.tree[idx] self.tree[idx] = p self._propagate(idx) def _get_single(self, a, b, rand): #rand = random.uniform(a, b) idx = self._retrieve(0, rand) # search the max leaf priority based on the lower_bound (rand here) data_idx = idx - self.capacity + 1 return idx, self.tree[idx], self.data[data_idx] def get_batch(self, n): batch_idx = [] batch = [] priorities = [] segment = self._total() / n for i in range(n): a = segment * i b = segment * (i + 1) rand = random.uniform(a, b) (idx, p, data) = self._get_single(a, b, rand) if data == 0: (idx, p, data) = self._get_single(a, b, rand) batch.append(data) batch_idx.append(idx) priorities.append(p) if batch[63] == 0: batch = batch return batch, batch_idx, priorities def get_len(self): return self.num class RankBased: def __init__(self, max_capacity): self.capacity = max_capacity self.data = [] self.priorities = None self.total = None self.cum_sum = None self.tiebreaker = count() def memory_full(self): return len(self.data) >= self.capacity def add(self, error, data): # check if there is space left in memory while self.memory_full(): oldest_idx = min(enumerate(self.data), key=lambda d: d[1][1])[0] del self.data[oldest_idx] # use tie breaker for transitions with equal error data = (error, next(self.tiebreaker), *data) heapq.heappush(self.data, data) def update(self, idx, error): self.data[idx] = (error, *self.data[idx][1:]) def get_batch(self, n): self._update_priorities() self.total = np.sum(self.priorities) self.cum_sum = np.cumsum(self.priorities) batch = [] priorities = [] # sample hole batch indicies is faster than each individual rands = np.random.rand(n) * self.total batch_idx = np.searchsorted(self.cum_sum, rands) # picking transitions one by one is faster than indixing with a list for idx in batch_idx: batch.append(self.data[idx][2:]) priorities.append(self.priorities[idx]) return batch, batch_idx, priorities def get_len(self): return len(self.data) def _update_priorities(self): # order is inverse of actual position in heap order = np.array(range(self.get_len() + 1, 1, -1)) self.priorities = 1. / order class PrioritizedReplayMemory: # stored as ( s, a, r, s_ ) in SumTree # modified https://github.com/wotmd5731/dqn/blob/master/memory.py def __init__(self, max_capacity, method="prop"): if method == "prop": self.container = SumTree(max_capacity) elif method == "rank":
return self.container.memory_full() def push(self, error, sample): self.container.add(error, sample) def sample(self, n): return self.container.get_batch(n) def update(self, idx, error): self.container.update(idx, error) def resize_memory(self, new_size=None): """Redefines the size of the buffer. Inputs: new_size (type: int), capacity = new_size.""" self.container.capacity = new_size def __len__(self): return self.container.get_len() # sanity check if __name__ == "__main__": capacity = 10 # CombinedReplayMemory(capacity)#NaiveReplayMemory(capacity) memory = PrioritizedReplayMemory(capacity) env, _ = get_env("Acrobot-v1") # Sample a transition s = env.reset() a = env.action_space.sample() s_next, r, done, _ = env.step(a) # Push a transition err = 0.5 memory.push(err, (s, a, r, s_next, done)) # Sample a batch size of 1 print(memory.sample(1))
self.container = RankBased(max_capacity) else: raise ValueError("Bad replay method") def memory_full(self):
random_line_split
replay.py
import random from collections import deque from environment import get_env import numpy as np import heapq from itertools import count class BufferSizeManager: def __init__(self, initial_capacity, size_change=20): """Adaptive buffer size. If size_change > 1: Linear buffer update as in: https://arxiv.org/pdf/1710.06574.pdf If size_change in [0, 1]: Percentage update. If size_change < 0 then the algorithm works in the inverse manner as described in the paper. You should imagine the buffer manager as a mid-aged fat man that believes his role is key in the success of the company, even though many people think they could do without him.""" self.capacity = initial_capacity self.k = size_change self.td_error = 0 def update_td_error(self, new_td_error): self.td_error = abs(new_td_error) def update_memory_size(self, new_td_error): new_td_error = abs(new_td_error) # update = -1 if new_td_error < self.td_error, then the buffer must decrease; # update = 1 if new_td_error > self.td_error, than the buffer must increase; # update = 0 if new_td_error = self.td_error, buffer size remains constant. delta = new_td_error - self.td_error e = 1e-7 if abs(delta) < e: # for numeric stability return self.capacity update = delta / abs(delta) # allow for non-linear update (not covered in the method proposed by the paper) if abs(self.k) < 1: update *= int(self.capacity * self.k) else: update *= int(self.k) # Update the buffer size self.capacity = max(self.capacity + update, 1) # Update the stored td_error self.update_td_error(new_td_error) return self.capacity class NaiveReplayMemory: def __init__(self, capacity): self.capacity = capacity # List is necessary for dynamic buffer self.memory = [] # deque(maxlen=capacity) def pop(self, idx=0): # Pop is redefined as taking the oldest element (FIFO) for convinience. return self.memory.pop(idx) def memory_full(self): return len(self.memory) >= self.capacity def push(self, transition): while len(self.memory) >= self.capacity: _ = self.pop() self.memory.append(transition) def sample(self, batch_size): return random.sample(self.memory, batch_size) def resize_memory(self, new_size=None): """Redefines the size of the buffer. Inputs: new_size (type: int), capacity = new_size.""" self.capacity = new_size # self.push() takes care of decreasing the memory. # # Oldest experiences are discarded. For Ever. # # TODO: Check for a more efficient way of cleaning the memory. # while len(self.memory) > self.capacity: # _ = self.pop() def __len__(self): return len(self.memory) # Add different experience replay methods class CombinedReplayMemory(NaiveReplayMemory): def push(self, transition): while len(self.memory) >= self.capacity: _ = self.pop() self.memory.append(transition) self.last_transition = transition def sample(self, batch_size): samples = random.sample(self.memory, batch_size - 1) samples.append(self.last_transition) return samples class SumTree: # started from https://github.com/wotmd5731/dqn/blob/master/memory.py write = 0 def __init__(self, max_capacity): self.capacity = max_capacity self.tree = np.zeros(2 * max_capacity - 1) # [--------------Parent nodes-------------][-------leaves to recode priority-------] # size: capacity - 1 size: capacity self.data = np.zeros(max_capacity, dtype=object) # for all transitions # [--------------data frame-------------] # size: capacity self.num = 0 self.e = 0.01 # small amount to avoid zero priority self.a = 0.6 # [0~1] convert the importance of TD error to priority def _get_priority(self, error): return (error + self.e) ** self.a def _propagate_old(self, idx, change): parent = (idx - 1) // 2 self.tree[parent] += change if parent != 0: self._propagate(parent, change) def _propagate(self, idx): parent = (idx - 1) // 2 left = parent * 2 + 1 right = parent * 2 + 2 self.tree[parent] = self.tree[right] + self.tree[left] if parent != 0: self._propagate(parent) def _retrieve(self, idx, rand): """ Tree structure and array storage: Tree index: 0 -> storing priority sum / \ 1 2 / \ / \ 3 4 5 6 -> storing priority for transitions Array type for storing: [0,1,2,3,4,5,6] """ left = 2 * idx + 1 right = left + 1 if left >= len(self.tree): # end search when no more child return idx if rand <= self.tree[left]: # downward search, always search for a higher priority node return self._retrieve(left, rand) else: return self._retrieve(right, rand - self.tree[left]) def _total(self): return self.tree[0] # the root def add(self, error, data): idx = self.write + self.capacity - 1 self.data[self.write] = data # update data_frame self.update(idx, error) # update tree_frame self.write += 1 if self.write >= self.capacity: # replace when exceed the capacity self.write = 0 if self.num < self.capacity: self.num += 1 def update(self, idx, error): p = self._get_priority(error) # change = p - self.tree[idx] self.tree[idx] = p self._propagate(idx) def _get_single(self, a, b, rand): #rand = random.uniform(a, b) idx = self._retrieve(0, rand) # search the max leaf priority based on the lower_bound (rand here) data_idx = idx - self.capacity + 1 return idx, self.tree[idx], self.data[data_idx] def get_batch(self, n): batch_idx = [] batch = [] priorities = [] segment = self._total() / n for i in range(n): a = segment * i b = segment * (i + 1) rand = random.uniform(a, b) (idx, p, data) = self._get_single(a, b, rand) if data == 0: (idx, p, data) = self._get_single(a, b, rand) batch.append(data) batch_idx.append(idx) priorities.append(p) if batch[63] == 0: batch = batch return batch, batch_idx, priorities def get_len(self): return self.num class RankBased: def __init__(self, max_capacity): self.capacity = max_capacity self.data = [] self.priorities = None self.total = None self.cum_sum = None self.tiebreaker = count() def memory_full(self): return len(self.data) >= self.capacity def add(self, error, data): # check if there is space left in memory while self.memory_full(): oldest_idx = min(enumerate(self.data), key=lambda d: d[1][1])[0] del self.data[oldest_idx] # use tie breaker for transitions with equal error data = (error, next(self.tiebreaker), *data) heapq.heappush(self.data, data) def
(self, idx, error): self.data[idx] = (error, *self.data[idx][1:]) def get_batch(self, n): self._update_priorities() self.total = np.sum(self.priorities) self.cum_sum = np.cumsum(self.priorities) batch = [] priorities = [] # sample hole batch indicies is faster than each individual rands = np.random.rand(n) * self.total batch_idx = np.searchsorted(self.cum_sum, rands) # picking transitions one by one is faster than indixing with a list for idx in batch_idx: batch.append(self.data[idx][2:]) priorities.append(self.priorities[idx]) return batch, batch_idx, priorities def get_len(self): return len(self.data) def _update_priorities(self): # order is inverse of actual position in heap order = np.array(range(self.get_len() + 1, 1, -1)) self.priorities = 1. / order class PrioritizedReplayMemory: # stored as ( s, a, r, s_ ) in SumTree # modified https://github.com/wotmd5731/dqn/blob/master/memory.py def __init__(self, max_capacity, method="prop"): if method == "prop": self.container = SumTree(max_capacity) elif method == "rank": self.container = RankBased(max_capacity) else: raise ValueError("Bad replay method") def memory_full(self): return self.container.memory_full() def push(self, error, sample): self.container.add(error, sample) def sample(self, n): return self.container.get_batch(n) def update(self, idx, error): self.container.update(idx, error) def resize_memory(self, new_size=None): """Redefines the size of the buffer. Inputs: new_size (type: int), capacity = new_size.""" self.container.capacity = new_size def __len__(self): return self.container.get_len() # sanity check if __name__ == "__main__": capacity = 10 # CombinedReplayMemory(capacity)#NaiveReplayMemory(capacity) memory = PrioritizedReplayMemory(capacity) env, _ = get_env("Acrobot-v1") # Sample a transition s = env.reset() a = env.action_space.sample() s_next, r, done, _ = env.step(a) # Push a transition err = 0.5 memory.push(err, (s, a, r, s_next, done)) # Sample a batch size of 1 print(memory.sample(1))
update
identifier_name
replay.py
import random from collections import deque from environment import get_env import numpy as np import heapq from itertools import count class BufferSizeManager: def __init__(self, initial_capacity, size_change=20): """Adaptive buffer size. If size_change > 1: Linear buffer update as in: https://arxiv.org/pdf/1710.06574.pdf If size_change in [0, 1]: Percentage update. If size_change < 0 then the algorithm works in the inverse manner as described in the paper. You should imagine the buffer manager as a mid-aged fat man that believes his role is key in the success of the company, even though many people think they could do without him.""" self.capacity = initial_capacity self.k = size_change self.td_error = 0 def update_td_error(self, new_td_error): self.td_error = abs(new_td_error) def update_memory_size(self, new_td_error): new_td_error = abs(new_td_error) # update = -1 if new_td_error < self.td_error, then the buffer must decrease; # update = 1 if new_td_error > self.td_error, than the buffer must increase; # update = 0 if new_td_error = self.td_error, buffer size remains constant. delta = new_td_error - self.td_error e = 1e-7 if abs(delta) < e: # for numeric stability return self.capacity update = delta / abs(delta) # allow for non-linear update (not covered in the method proposed by the paper) if abs(self.k) < 1: update *= int(self.capacity * self.k) else: update *= int(self.k) # Update the buffer size self.capacity = max(self.capacity + update, 1) # Update the stored td_error self.update_td_error(new_td_error) return self.capacity class NaiveReplayMemory: def __init__(self, capacity): self.capacity = capacity # List is necessary for dynamic buffer self.memory = [] # deque(maxlen=capacity) def pop(self, idx=0): # Pop is redefined as taking the oldest element (FIFO) for convinience. return self.memory.pop(idx) def memory_full(self): return len(self.memory) >= self.capacity def push(self, transition): while len(self.memory) >= self.capacity: _ = self.pop() self.memory.append(transition) def sample(self, batch_size): return random.sample(self.memory, batch_size) def resize_memory(self, new_size=None): """Redefines the size of the buffer. Inputs: new_size (type: int), capacity = new_size.""" self.capacity = new_size # self.push() takes care of decreasing the memory. # # Oldest experiences are discarded. For Ever. # # TODO: Check for a more efficient way of cleaning the memory. # while len(self.memory) > self.capacity: # _ = self.pop() def __len__(self): return len(self.memory) # Add different experience replay methods class CombinedReplayMemory(NaiveReplayMemory): def push(self, transition): while len(self.memory) >= self.capacity: _ = self.pop() self.memory.append(transition) self.last_transition = transition def sample(self, batch_size): samples = random.sample(self.memory, batch_size - 1) samples.append(self.last_transition) return samples class SumTree: # started from https://github.com/wotmd5731/dqn/blob/master/memory.py write = 0 def __init__(self, max_capacity): self.capacity = max_capacity self.tree = np.zeros(2 * max_capacity - 1) # [--------------Parent nodes-------------][-------leaves to recode priority-------] # size: capacity - 1 size: capacity self.data = np.zeros(max_capacity, dtype=object) # for all transitions # [--------------data frame-------------] # size: capacity self.num = 0 self.e = 0.01 # small amount to avoid zero priority self.a = 0.6 # [0~1] convert the importance of TD error to priority def _get_priority(self, error): return (error + self.e) ** self.a def _propagate_old(self, idx, change): parent = (idx - 1) // 2 self.tree[parent] += change if parent != 0: self._propagate(parent, change) def _propagate(self, idx): parent = (idx - 1) // 2 left = parent * 2 + 1 right = parent * 2 + 2 self.tree[parent] = self.tree[right] + self.tree[left] if parent != 0: self._propagate(parent) def _retrieve(self, idx, rand): """ Tree structure and array storage: Tree index: 0 -> storing priority sum / \ 1 2 / \ / \ 3 4 5 6 -> storing priority for transitions Array type for storing: [0,1,2,3,4,5,6] """ left = 2 * idx + 1 right = left + 1 if left >= len(self.tree): # end search when no more child return idx if rand <= self.tree[left]: # downward search, always search for a higher priority node return self._retrieve(left, rand) else: return self._retrieve(right, rand - self.tree[left]) def _total(self): return self.tree[0] # the root def add(self, error, data): idx = self.write + self.capacity - 1 self.data[self.write] = data # update data_frame self.update(idx, error) # update tree_frame self.write += 1 if self.write >= self.capacity: # replace when exceed the capacity self.write = 0 if self.num < self.capacity: self.num += 1 def update(self, idx, error): p = self._get_priority(error) # change = p - self.tree[idx] self.tree[idx] = p self._propagate(idx) def _get_single(self, a, b, rand): #rand = random.uniform(a, b) idx = self._retrieve(0, rand) # search the max leaf priority based on the lower_bound (rand here) data_idx = idx - self.capacity + 1 return idx, self.tree[idx], self.data[data_idx] def get_batch(self, n): batch_idx = [] batch = [] priorities = [] segment = self._total() / n for i in range(n): a = segment * i b = segment * (i + 1) rand = random.uniform(a, b) (idx, p, data) = self._get_single(a, b, rand) if data == 0: (idx, p, data) = self._get_single(a, b, rand) batch.append(data) batch_idx.append(idx) priorities.append(p) if batch[63] == 0: batch = batch return batch, batch_idx, priorities def get_len(self): return self.num class RankBased: def __init__(self, max_capacity): self.capacity = max_capacity self.data = [] self.priorities = None self.total = None self.cum_sum = None self.tiebreaker = count() def memory_full(self): return len(self.data) >= self.capacity def add(self, error, data): # check if there is space left in memory while self.memory_full(): oldest_idx = min(enumerate(self.data), key=lambda d: d[1][1])[0] del self.data[oldest_idx] # use tie breaker for transitions with equal error data = (error, next(self.tiebreaker), *data) heapq.heappush(self.data, data) def update(self, idx, error): self.data[idx] = (error, *self.data[idx][1:]) def get_batch(self, n): self._update_priorities() self.total = np.sum(self.priorities) self.cum_sum = np.cumsum(self.priorities) batch = [] priorities = [] # sample hole batch indicies is faster than each individual rands = np.random.rand(n) * self.total batch_idx = np.searchsorted(self.cum_sum, rands) # picking transitions one by one is faster than indixing with a list for idx in batch_idx:
return batch, batch_idx, priorities def get_len(self): return len(self.data) def _update_priorities(self): # order is inverse of actual position in heap order = np.array(range(self.get_len() + 1, 1, -1)) self.priorities = 1. / order class PrioritizedReplayMemory: # stored as ( s, a, r, s_ ) in SumTree # modified https://github.com/wotmd5731/dqn/blob/master/memory.py def __init__(self, max_capacity, method="prop"): if method == "prop": self.container = SumTree(max_capacity) elif method == "rank": self.container = RankBased(max_capacity) else: raise ValueError("Bad replay method") def memory_full(self): return self.container.memory_full() def push(self, error, sample): self.container.add(error, sample) def sample(self, n): return self.container.get_batch(n) def update(self, idx, error): self.container.update(idx, error) def resize_memory(self, new_size=None): """Redefines the size of the buffer. Inputs: new_size (type: int), capacity = new_size.""" self.container.capacity = new_size def __len__(self): return self.container.get_len() # sanity check if __name__ == "__main__": capacity = 10 # CombinedReplayMemory(capacity)#NaiveReplayMemory(capacity) memory = PrioritizedReplayMemory(capacity) env, _ = get_env("Acrobot-v1") # Sample a transition s = env.reset() a = env.action_space.sample() s_next, r, done, _ = env.step(a) # Push a transition err = 0.5 memory.push(err, (s, a, r, s_next, done)) # Sample a batch size of 1 print(memory.sample(1))
batch.append(self.data[idx][2:]) priorities.append(self.priorities[idx])
conditional_block
replay.py
import random from collections import deque from environment import get_env import numpy as np import heapq from itertools import count class BufferSizeManager: def __init__(self, initial_capacity, size_change=20): """Adaptive buffer size. If size_change > 1: Linear buffer update as in: https://arxiv.org/pdf/1710.06574.pdf If size_change in [0, 1]: Percentage update. If size_change < 0 then the algorithm works in the inverse manner as described in the paper. You should imagine the buffer manager as a mid-aged fat man that believes his role is key in the success of the company, even though many people think they could do without him.""" self.capacity = initial_capacity self.k = size_change self.td_error = 0 def update_td_error(self, new_td_error): self.td_error = abs(new_td_error) def update_memory_size(self, new_td_error): new_td_error = abs(new_td_error) # update = -1 if new_td_error < self.td_error, then the buffer must decrease; # update = 1 if new_td_error > self.td_error, than the buffer must increase; # update = 0 if new_td_error = self.td_error, buffer size remains constant. delta = new_td_error - self.td_error e = 1e-7 if abs(delta) < e: # for numeric stability return self.capacity update = delta / abs(delta) # allow for non-linear update (not covered in the method proposed by the paper) if abs(self.k) < 1: update *= int(self.capacity * self.k) else: update *= int(self.k) # Update the buffer size self.capacity = max(self.capacity + update, 1) # Update the stored td_error self.update_td_error(new_td_error) return self.capacity class NaiveReplayMemory: def __init__(self, capacity): self.capacity = capacity # List is necessary for dynamic buffer self.memory = [] # deque(maxlen=capacity) def pop(self, idx=0): # Pop is redefined as taking the oldest element (FIFO) for convinience. return self.memory.pop(idx) def memory_full(self): return len(self.memory) >= self.capacity def push(self, transition): while len(self.memory) >= self.capacity: _ = self.pop() self.memory.append(transition) def sample(self, batch_size): return random.sample(self.memory, batch_size) def resize_memory(self, new_size=None): """Redefines the size of the buffer. Inputs: new_size (type: int), capacity = new_size.""" self.capacity = new_size # self.push() takes care of decreasing the memory. # # Oldest experiences are discarded. For Ever. # # TODO: Check for a more efficient way of cleaning the memory. # while len(self.memory) > self.capacity: # _ = self.pop() def __len__(self): return len(self.memory) # Add different experience replay methods class CombinedReplayMemory(NaiveReplayMemory): def push(self, transition): while len(self.memory) >= self.capacity: _ = self.pop() self.memory.append(transition) self.last_transition = transition def sample(self, batch_size): samples = random.sample(self.memory, batch_size - 1) samples.append(self.last_transition) return samples class SumTree: # started from https://github.com/wotmd5731/dqn/blob/master/memory.py write = 0 def __init__(self, max_capacity): self.capacity = max_capacity self.tree = np.zeros(2 * max_capacity - 1) # [--------------Parent nodes-------------][-------leaves to recode priority-------] # size: capacity - 1 size: capacity self.data = np.zeros(max_capacity, dtype=object) # for all transitions # [--------------data frame-------------] # size: capacity self.num = 0 self.e = 0.01 # small amount to avoid zero priority self.a = 0.6 # [0~1] convert the importance of TD error to priority def _get_priority(self, error): return (error + self.e) ** self.a def _propagate_old(self, idx, change): parent = (idx - 1) // 2 self.tree[parent] += change if parent != 0: self._propagate(parent, change) def _propagate(self, idx): parent = (idx - 1) // 2 left = parent * 2 + 1 right = parent * 2 + 2 self.tree[parent] = self.tree[right] + self.tree[left] if parent != 0: self._propagate(parent) def _retrieve(self, idx, rand): """ Tree structure and array storage: Tree index: 0 -> storing priority sum / \ 1 2 / \ / \ 3 4 5 6 -> storing priority for transitions Array type for storing: [0,1,2,3,4,5,6] """ left = 2 * idx + 1 right = left + 1 if left >= len(self.tree): # end search when no more child return idx if rand <= self.tree[left]: # downward search, always search for a higher priority node return self._retrieve(left, rand) else: return self._retrieve(right, rand - self.tree[left]) def _total(self): return self.tree[0] # the root def add(self, error, data): idx = self.write + self.capacity - 1 self.data[self.write] = data # update data_frame self.update(idx, error) # update tree_frame self.write += 1 if self.write >= self.capacity: # replace when exceed the capacity self.write = 0 if self.num < self.capacity: self.num += 1 def update(self, idx, error): p = self._get_priority(error) # change = p - self.tree[idx] self.tree[idx] = p self._propagate(idx) def _get_single(self, a, b, rand): #rand = random.uniform(a, b) idx = self._retrieve(0, rand) # search the max leaf priority based on the lower_bound (rand here) data_idx = idx - self.capacity + 1 return idx, self.tree[idx], self.data[data_idx] def get_batch(self, n): batch_idx = [] batch = [] priorities = [] segment = self._total() / n for i in range(n): a = segment * i b = segment * (i + 1) rand = random.uniform(a, b) (idx, p, data) = self._get_single(a, b, rand) if data == 0: (idx, p, data) = self._get_single(a, b, rand) batch.append(data) batch_idx.append(idx) priorities.append(p) if batch[63] == 0: batch = batch return batch, batch_idx, priorities def get_len(self): return self.num class RankBased: def __init__(self, max_capacity): self.capacity = max_capacity self.data = [] self.priorities = None self.total = None self.cum_sum = None self.tiebreaker = count() def memory_full(self): return len(self.data) >= self.capacity def add(self, error, data): # check if there is space left in memory while self.memory_full(): oldest_idx = min(enumerate(self.data), key=lambda d: d[1][1])[0] del self.data[oldest_idx] # use tie breaker for transitions with equal error data = (error, next(self.tiebreaker), *data) heapq.heappush(self.data, data) def update(self, idx, error): self.data[idx] = (error, *self.data[idx][1:]) def get_batch(self, n): self._update_priorities() self.total = np.sum(self.priorities) self.cum_sum = np.cumsum(self.priorities) batch = [] priorities = [] # sample hole batch indicies is faster than each individual rands = np.random.rand(n) * self.total batch_idx = np.searchsorted(self.cum_sum, rands) # picking transitions one by one is faster than indixing with a list for idx in batch_idx: batch.append(self.data[idx][2:]) priorities.append(self.priorities[idx]) return batch, batch_idx, priorities def get_len(self): return len(self.data) def _update_priorities(self): # order is inverse of actual position in heap order = np.array(range(self.get_len() + 1, 1, -1)) self.priorities = 1. / order class PrioritizedReplayMemory: # stored as ( s, a, r, s_ ) in SumTree # modified https://github.com/wotmd5731/dqn/blob/master/memory.py
# sanity check if __name__ == "__main__": capacity = 10 # CombinedReplayMemory(capacity)#NaiveReplayMemory(capacity) memory = PrioritizedReplayMemory(capacity) env, _ = get_env("Acrobot-v1") # Sample a transition s = env.reset() a = env.action_space.sample() s_next, r, done, _ = env.step(a) # Push a transition err = 0.5 memory.push(err, (s, a, r, s_next, done)) # Sample a batch size of 1 print(memory.sample(1))
def __init__(self, max_capacity, method="prop"): if method == "prop": self.container = SumTree(max_capacity) elif method == "rank": self.container = RankBased(max_capacity) else: raise ValueError("Bad replay method") def memory_full(self): return self.container.memory_full() def push(self, error, sample): self.container.add(error, sample) def sample(self, n): return self.container.get_batch(n) def update(self, idx, error): self.container.update(idx, error) def resize_memory(self, new_size=None): """Redefines the size of the buffer. Inputs: new_size (type: int), capacity = new_size.""" self.container.capacity = new_size def __len__(self): return self.container.get_len()
identifier_body
jupyterExporter.ts
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. 'use strict'; import type { nbformat } from '@jupyterlab/coreutils'; import { inject, injectable } from 'inversify'; import * as os from 'os'; import * as path from 'path'; import * as uuid from 'uuid/v4'; import { Uri } from 'vscode'; import { concatMultilineStringInput } from '../../../datascience-ui/common'; import { createCodeCell } from '../../../datascience-ui/common/cellFactory'; import { IApplicationShell, IWorkspaceService } from '../../common/application/types'; import { traceError } from '../../common/logger'; import { IFileSystem, IPlatformService } from '../../common/platform/types'; import { IConfigurationService } from '../../common/types'; import * as localize from '../../common/utils/localize'; import { noop } from '../../common/utils/misc'; import { CellMatcher } from '../cellMatcher'; import { CodeSnippits, Identifiers } from '../constants'; import { CellState, ICell, IDataScienceErrorHandler, IJupyterExecution, INotebookEditorProvider, INotebookExporter } from '../types'; @injectable() export class JupyterExporter implements INotebookExporter {
( @inject(IJupyterExecution) private jupyterExecution: IJupyterExecution, @inject(IWorkspaceService) private workspaceService: IWorkspaceService, @inject(IConfigurationService) private configService: IConfigurationService, @inject(IFileSystem) private fileSystem: IFileSystem, @inject(IPlatformService) private readonly platform: IPlatformService, @inject(IApplicationShell) private readonly applicationShell: IApplicationShell, @inject(INotebookEditorProvider) protected ipynbProvider: INotebookEditorProvider, @inject(IDataScienceErrorHandler) protected errorHandler: IDataScienceErrorHandler ) {} public dispose() { noop(); } public async exportToFile(cells: ICell[], file: string): Promise<void> { let directoryChange; const settings = this.configService.getSettings(); if (settings.datascience.changeDirOnImportExport) { directoryChange = file; } const notebook = await this.translateToNotebook(cells, directoryChange); try { // tslint:disable-next-line: no-any const contents = JSON.stringify(notebook); await this.fileSystem.writeFile(file, contents, { encoding: 'utf8', flag: 'w' }); const openQuestion1 = localize.DataScience.exportOpenQuestion1(); const openQuestion2 = (await this.jupyterExecution.isSpawnSupported()) ? localize.DataScience.exportOpenQuestion() : undefined; this.showInformationMessage( localize.DataScience.exportDialogComplete().format(file), openQuestion1, openQuestion2 ).then(async (str: string | undefined) => { try { if (str === openQuestion2 && openQuestion2) { // If the user wants to, open the notebook they just generated. await this.jupyterExecution.spawnNotebook(file); } else if (str === openQuestion1) { await this.ipynbProvider.open(Uri.file(file)); } } catch (e) { await this.errorHandler.handleError(e); } }); } catch (exc) { traceError('Error in exporting notebook file'); this.applicationShell.showInformationMessage(localize.DataScience.exportDialogFailed().format(exc)); } } public async translateToNotebook( cells: ICell[], changeDirectory?: string ): Promise<nbformat.INotebookContent | undefined> { // If requested, add in a change directory cell to fix relative paths if (changeDirectory && this.configService.getSettings().datascience.changeDirOnImportExport) { cells = await this.addDirectoryChangeCell(cells, changeDirectory); } const pythonNumber = await this.extractPythonMainVersion(); // Use this to build our metadata object const metadata: nbformat.INotebookMetadata = { language_info: { codemirror_mode: { name: 'ipython', version: pythonNumber }, file_extension: '.py', mimetype: 'text/x-python', name: 'python', nbconvert_exporter: 'python', pygments_lexer: `ipython${pythonNumber}`, version: pythonNumber }, orig_nbformat: 2 }; // Create an object for matching cell definitions const matcher = new CellMatcher(this.configService.getSettings().datascience); // Combine this into a JSON object return { cells: this.pruneCells(cells, matcher), nbformat: 4, nbformat_minor: 2, metadata: metadata }; } private showInformationMessage( message: string, question1: string, question2?: string ): Thenable<string | undefined> { if (question2) { return this.applicationShell.showInformationMessage(message, question1, question2); } else { return this.applicationShell.showInformationMessage(message, question1); } } // For exporting, put in a cell that will change the working directory back to the workspace directory so relative data paths will load correctly private addDirectoryChangeCell = async (cells: ICell[], file: string): Promise<ICell[]> => { const changeDirectory = await this.calculateDirectoryChange(file, cells); if (changeDirectory) { const exportChangeDirectory = CodeSnippits.ChangeDirectory.join(os.EOL).format( localize.DataScience.exportChangeDirectoryComment(), CodeSnippits.ChangeDirectoryCommentIdentifier, changeDirectory ); const cell: ICell = { data: createCodeCell(exportChangeDirectory), id: uuid(), file: Identifiers.EmptyFileName, line: 0, state: CellState.finished }; return [cell, ...cells]; } else { return cells; } }; // When we export we want to our change directory back to the first real file that we saw run from any workspace folder private firstWorkspaceFolder = async (cells: ICell[]): Promise<string | undefined> => { for (const cell of cells) { const filename = cell.file; // First check that this is an absolute file that exists (we add in temp files to run system cell) if (path.isAbsolute(filename) && (await this.fileSystem.fileExists(filename))) { // We've already check that workspace folders above for (const folder of this.workspaceService.workspaceFolders!) { if (filename.toLowerCase().startsWith(folder.uri.fsPath.toLowerCase())) { return folder.uri.fsPath; } } } } return undefined; }; private calculateDirectoryChange = async (notebookFile: string, cells: ICell[]): Promise<string | undefined> => { // Make sure we don't already have a cell with a ChangeDirectory comment in it. let directoryChange: string | undefined; const haveChangeAlready = cells.find((c) => concatMultilineStringInput(c.data.source).includes(CodeSnippits.ChangeDirectoryCommentIdentifier) ); if (!haveChangeAlready) { const notebookFilePath = path.dirname(notebookFile); // First see if we have a workspace open, this only works if we have a workspace root to be relative to if (this.workspaceService.hasWorkspaceFolders) { const workspacePath = await this.firstWorkspaceFolder(cells); // Make sure that we have everything that we need here if ( workspacePath && path.isAbsolute(workspacePath) && notebookFilePath && path.isAbsolute(notebookFilePath) ) { directoryChange = path.relative(notebookFilePath, workspacePath); } } } // If path.relative can't calculate a relative path, then it just returns the full second path // so check here, we only want this if we were able to calculate a relative path, no network shares or drives if (directoryChange && !path.isAbsolute(directoryChange)) { // Escape windows path chars so they end up in the source escaped if (this.platform.isWindows) { directoryChange = directoryChange.replace('\\', '\\\\'); } return directoryChange; } else { return undefined; } }; private pruneCells = (cells: ICell[], cellMatcher: CellMatcher): nbformat.IBaseCell[] => { // First filter out sys info cells. Jupyter doesn't understand these const filtered = cells.filter((c) => c.data.cell_type !== 'messages'); // Then prune each cell down to just the cell data. return filtered.map((c) => this.pruneCell(c, cellMatcher)); }; private pruneCell = (cell: ICell, cellMatcher: CellMatcher): nbformat.IBaseCell => { // Remove the #%% of the top of the source if there is any. We don't need // this to end up in the exported ipynb file. const copy = { ...cell.data }; copy.source = this.pruneSource(cell.data.source, cellMatcher); return copy; }; private pruneSource = (source: nbformat.MultilineString, cellMatcher: CellMatcher): nbformat.MultilineString => { // Remove the comments on the top if there. if (Array.isArray(source) && source.length > 0) { if (cellMatcher.isCell(source[0])) { return source.slice(1); } } else { const array = source .toString() .split('\n') .map((s) => `${s}\n`); if (array.length > 0 && cellMatcher.isCell(array[0])) { return array.slice(1); } } return source; }; private extractPythonMainVersion = async (): Promise<number> => { // Use the active interpreter const usableInterpreter = await this.jupyterExecution.getUsableJupyterPython(); return usableInterpreter && usableInterpreter.version ? usableInterpreter.version.major : 3; }; }
constructor
identifier_name
jupyterExporter.ts
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. 'use strict'; import type { nbformat } from '@jupyterlab/coreutils'; import { inject, injectable } from 'inversify'; import * as os from 'os'; import * as path from 'path'; import * as uuid from 'uuid/v4'; import { Uri } from 'vscode'; import { concatMultilineStringInput } from '../../../datascience-ui/common'; import { createCodeCell } from '../../../datascience-ui/common/cellFactory'; import { IApplicationShell, IWorkspaceService } from '../../common/application/types'; import { traceError } from '../../common/logger'; import { IFileSystem, IPlatformService } from '../../common/platform/types'; import { IConfigurationService } from '../../common/types'; import * as localize from '../../common/utils/localize'; import { noop } from '../../common/utils/misc'; import { CellMatcher } from '../cellMatcher'; import { CodeSnippits, Identifiers } from '../constants'; import { CellState, ICell, IDataScienceErrorHandler, IJupyterExecution, INotebookEditorProvider, INotebookExporter } from '../types'; @injectable() export class JupyterExporter implements INotebookExporter { constructor( @inject(IJupyterExecution) private jupyterExecution: IJupyterExecution, @inject(IWorkspaceService) private workspaceService: IWorkspaceService, @inject(IConfigurationService) private configService: IConfigurationService, @inject(IFileSystem) private fileSystem: IFileSystem, @inject(IPlatformService) private readonly platform: IPlatformService, @inject(IApplicationShell) private readonly applicationShell: IApplicationShell, @inject(INotebookEditorProvider) protected ipynbProvider: INotebookEditorProvider, @inject(IDataScienceErrorHandler) protected errorHandler: IDataScienceErrorHandler ) {} public dispose() { noop(); } public async exportToFile(cells: ICell[], file: string): Promise<void> { let directoryChange; const settings = this.configService.getSettings(); if (settings.datascience.changeDirOnImportExport) { directoryChange = file; } const notebook = await this.translateToNotebook(cells, directoryChange); try { // tslint:disable-next-line: no-any const contents = JSON.stringify(notebook); await this.fileSystem.writeFile(file, contents, { encoding: 'utf8', flag: 'w' }); const openQuestion1 = localize.DataScience.exportOpenQuestion1(); const openQuestion2 = (await this.jupyterExecution.isSpawnSupported()) ? localize.DataScience.exportOpenQuestion() : undefined; this.showInformationMessage( localize.DataScience.exportDialogComplete().format(file), openQuestion1, openQuestion2 ).then(async (str: string | undefined) => { try { if (str === openQuestion2 && openQuestion2) { // If the user wants to, open the notebook they just generated. await this.jupyterExecution.spawnNotebook(file); } else if (str === openQuestion1) { await this.ipynbProvider.open(Uri.file(file)); } } catch (e) { await this.errorHandler.handleError(e); } }); } catch (exc) { traceError('Error in exporting notebook file'); this.applicationShell.showInformationMessage(localize.DataScience.exportDialogFailed().format(exc)); } } public async translateToNotebook( cells: ICell[], changeDirectory?: string ): Promise<nbformat.INotebookContent | undefined> { // If requested, add in a change directory cell to fix relative paths if (changeDirectory && this.configService.getSettings().datascience.changeDirOnImportExport) { cells = await this.addDirectoryChangeCell(cells, changeDirectory); } const pythonNumber = await this.extractPythonMainVersion(); // Use this to build our metadata object const metadata: nbformat.INotebookMetadata = { language_info: { codemirror_mode: { name: 'ipython', version: pythonNumber }, file_extension: '.py', mimetype: 'text/x-python', name: 'python', nbconvert_exporter: 'python', pygments_lexer: `ipython${pythonNumber}`, version: pythonNumber }, orig_nbformat: 2 }; // Create an object for matching cell definitions const matcher = new CellMatcher(this.configService.getSettings().datascience); // Combine this into a JSON object return { cells: this.pruneCells(cells, matcher), nbformat: 4, nbformat_minor: 2, metadata: metadata }; } private showInformationMessage( message: string, question1: string, question2?: string ): Thenable<string | undefined> { if (question2) { return this.applicationShell.showInformationMessage(message, question1, question2); } else { return this.applicationShell.showInformationMessage(message, question1); } } // For exporting, put in a cell that will change the working directory back to the workspace directory so relative data paths will load correctly private addDirectoryChangeCell = async (cells: ICell[], file: string): Promise<ICell[]> => { const changeDirectory = await this.calculateDirectoryChange(file, cells); if (changeDirectory) { const exportChangeDirectory = CodeSnippits.ChangeDirectory.join(os.EOL).format( localize.DataScience.exportChangeDirectoryComment(), CodeSnippits.ChangeDirectoryCommentIdentifier, changeDirectory ); const cell: ICell = { data: createCodeCell(exportChangeDirectory), id: uuid(), file: Identifiers.EmptyFileName, line: 0, state: CellState.finished }; return [cell, ...cells]; } else { return cells; } }; // When we export we want to our change directory back to the first real file that we saw run from any workspace folder private firstWorkspaceFolder = async (cells: ICell[]): Promise<string | undefined> => { for (const cell of cells) { const filename = cell.file; // First check that this is an absolute file that exists (we add in temp files to run system cell) if (path.isAbsolute(filename) && (await this.fileSystem.fileExists(filename))) { // We've already check that workspace folders above for (const folder of this.workspaceService.workspaceFolders!) { if (filename.toLowerCase().startsWith(folder.uri.fsPath.toLowerCase())) { return folder.uri.fsPath; } } } } return undefined; }; private calculateDirectoryChange = async (notebookFile: string, cells: ICell[]): Promise<string | undefined> => { // Make sure we don't already have a cell with a ChangeDirectory comment in it. let directoryChange: string | undefined; const haveChangeAlready = cells.find((c) => concatMultilineStringInput(c.data.source).includes(CodeSnippits.ChangeDirectoryCommentIdentifier) ); if (!haveChangeAlready) { const notebookFilePath = path.dirname(notebookFile); // First see if we have a workspace open, this only works if we have a workspace root to be relative to if (this.workspaceService.hasWorkspaceFolders) { const workspacePath = await this.firstWorkspaceFolder(cells); // Make sure that we have everything that we need here if ( workspacePath && path.isAbsolute(workspacePath) && notebookFilePath && path.isAbsolute(notebookFilePath) ) { directoryChange = path.relative(notebookFilePath, workspacePath); } } } // If path.relative can't calculate a relative path, then it just returns the full second path // so check here, we only want this if we were able to calculate a relative path, no network shares or drives if (directoryChange && !path.isAbsolute(directoryChange)) { // Escape windows path chars so they end up in the source escaped if (this.platform.isWindows) { directoryChange = directoryChange.replace('\\', '\\\\'); } return directoryChange; } else { return undefined; } }; private pruneCells = (cells: ICell[], cellMatcher: CellMatcher): nbformat.IBaseCell[] => { // First filter out sys info cells. Jupyter doesn't understand these const filtered = cells.filter((c) => c.data.cell_type !== 'messages'); // Then prune each cell down to just the cell data. return filtered.map((c) => this.pruneCell(c, cellMatcher)); }; private pruneCell = (cell: ICell, cellMatcher: CellMatcher): nbformat.IBaseCell => { // Remove the #%% of the top of the source if there is any. We don't need // this to end up in the exported ipynb file. const copy = { ...cell.data }; copy.source = this.pruneSource(cell.data.source, cellMatcher); return copy; }; private pruneSource = (source: nbformat.MultilineString, cellMatcher: CellMatcher): nbformat.MultilineString => { // Remove the comments on the top if there. if (Array.isArray(source) && source.length > 0)
else { const array = source .toString() .split('\n') .map((s) => `${s}\n`); if (array.length > 0 && cellMatcher.isCell(array[0])) { return array.slice(1); } } return source; }; private extractPythonMainVersion = async (): Promise<number> => { // Use the active interpreter const usableInterpreter = await this.jupyterExecution.getUsableJupyterPython(); return usableInterpreter && usableInterpreter.version ? usableInterpreter.version.major : 3; }; }
{ if (cellMatcher.isCell(source[0])) { return source.slice(1); } }
conditional_block
jupyterExporter.ts
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. 'use strict'; import type { nbformat } from '@jupyterlab/coreutils'; import { inject, injectable } from 'inversify'; import * as os from 'os'; import * as path from 'path'; import * as uuid from 'uuid/v4'; import { Uri } from 'vscode'; import { concatMultilineStringInput } from '../../../datascience-ui/common'; import { createCodeCell } from '../../../datascience-ui/common/cellFactory'; import { IApplicationShell, IWorkspaceService } from '../../common/application/types'; import { traceError } from '../../common/logger'; import { IFileSystem, IPlatformService } from '../../common/platform/types'; import { IConfigurationService } from '../../common/types'; import * as localize from '../../common/utils/localize'; import { noop } from '../../common/utils/misc'; import { CellMatcher } from '../cellMatcher'; import { CodeSnippits, Identifiers } from '../constants'; import { CellState, ICell, IDataScienceErrorHandler, IJupyterExecution, INotebookEditorProvider, INotebookExporter } from '../types'; @injectable() export class JupyterExporter implements INotebookExporter { constructor( @inject(IJupyterExecution) private jupyterExecution: IJupyterExecution, @inject(IWorkspaceService) private workspaceService: IWorkspaceService, @inject(IConfigurationService) private configService: IConfigurationService, @inject(IFileSystem) private fileSystem: IFileSystem, @inject(IPlatformService) private readonly platform: IPlatformService, @inject(IApplicationShell) private readonly applicationShell: IApplicationShell, @inject(INotebookEditorProvider) protected ipynbProvider: INotebookEditorProvider, @inject(IDataScienceErrorHandler) protected errorHandler: IDataScienceErrorHandler ) {} public dispose() { noop(); } public async exportToFile(cells: ICell[], file: string): Promise<void> { let directoryChange; const settings = this.configService.getSettings(); if (settings.datascience.changeDirOnImportExport) { directoryChange = file; } const notebook = await this.translateToNotebook(cells, directoryChange); try { // tslint:disable-next-line: no-any const contents = JSON.stringify(notebook); await this.fileSystem.writeFile(file, contents, { encoding: 'utf8', flag: 'w' }); const openQuestion1 = localize.DataScience.exportOpenQuestion1(); const openQuestion2 = (await this.jupyterExecution.isSpawnSupported()) ? localize.DataScience.exportOpenQuestion() : undefined; this.showInformationMessage( localize.DataScience.exportDialogComplete().format(file), openQuestion1, openQuestion2 ).then(async (str: string | undefined) => { try { if (str === openQuestion2 && openQuestion2) { // If the user wants to, open the notebook they just generated. await this.jupyterExecution.spawnNotebook(file); } else if (str === openQuestion1) { await this.ipynbProvider.open(Uri.file(file)); } } catch (e) { await this.errorHandler.handleError(e); } }); } catch (exc) { traceError('Error in exporting notebook file'); this.applicationShell.showInformationMessage(localize.DataScience.exportDialogFailed().format(exc)); } } public async translateToNotebook( cells: ICell[], changeDirectory?: string ): Promise<nbformat.INotebookContent | undefined> { // If requested, add in a change directory cell to fix relative paths if (changeDirectory && this.configService.getSettings().datascience.changeDirOnImportExport) { cells = await this.addDirectoryChangeCell(cells, changeDirectory); } const pythonNumber = await this.extractPythonMainVersion(); // Use this to build our metadata object const metadata: nbformat.INotebookMetadata = { language_info: { codemirror_mode: { name: 'ipython', version: pythonNumber }, file_extension: '.py', mimetype: 'text/x-python', name: 'python', nbconvert_exporter: 'python', pygments_lexer: `ipython${pythonNumber}`, version: pythonNumber }, orig_nbformat: 2 }; // Create an object for matching cell definitions const matcher = new CellMatcher(this.configService.getSettings().datascience); // Combine this into a JSON object return { cells: this.pruneCells(cells, matcher), nbformat: 4, nbformat_minor: 2, metadata: metadata }; } private showInformationMessage( message: string, question1: string, question2?: string ): Thenable<string | undefined> { if (question2) { return this.applicationShell.showInformationMessage(message, question1, question2); } else { return this.applicationShell.showInformationMessage(message, question1); } } // For exporting, put in a cell that will change the working directory back to the workspace directory so relative data paths will load correctly private addDirectoryChangeCell = async (cells: ICell[], file: string): Promise<ICell[]> => { const changeDirectory = await this.calculateDirectoryChange(file, cells); if (changeDirectory) { const exportChangeDirectory = CodeSnippits.ChangeDirectory.join(os.EOL).format( localize.DataScience.exportChangeDirectoryComment(), CodeSnippits.ChangeDirectoryCommentIdentifier, changeDirectory ); const cell: ICell = { data: createCodeCell(exportChangeDirectory), id: uuid(), file: Identifiers.EmptyFileName, line: 0, state: CellState.finished }; return [cell, ...cells]; } else { return cells; } }; // When we export we want to our change directory back to the first real file that we saw run from any workspace folder private firstWorkspaceFolder = async (cells: ICell[]): Promise<string | undefined> => { for (const cell of cells) { const filename = cell.file; // First check that this is an absolute file that exists (we add in temp files to run system cell) if (path.isAbsolute(filename) && (await this.fileSystem.fileExists(filename))) { // We've already check that workspace folders above for (const folder of this.workspaceService.workspaceFolders!) { if (filename.toLowerCase().startsWith(folder.uri.fsPath.toLowerCase())) { return folder.uri.fsPath; } } } } return undefined; }; private calculateDirectoryChange = async (notebookFile: string, cells: ICell[]): Promise<string | undefined> => { // Make sure we don't already have a cell with a ChangeDirectory comment in it. let directoryChange: string | undefined; const haveChangeAlready = cells.find((c) => concatMultilineStringInput(c.data.source).includes(CodeSnippits.ChangeDirectoryCommentIdentifier) ); if (!haveChangeAlready) {
if (this.workspaceService.hasWorkspaceFolders) { const workspacePath = await this.firstWorkspaceFolder(cells); // Make sure that we have everything that we need here if ( workspacePath && path.isAbsolute(workspacePath) && notebookFilePath && path.isAbsolute(notebookFilePath) ) { directoryChange = path.relative(notebookFilePath, workspacePath); } } } // If path.relative can't calculate a relative path, then it just returns the full second path // so check here, we only want this if we were able to calculate a relative path, no network shares or drives if (directoryChange && !path.isAbsolute(directoryChange)) { // Escape windows path chars so they end up in the source escaped if (this.platform.isWindows) { directoryChange = directoryChange.replace('\\', '\\\\'); } return directoryChange; } else { return undefined; } }; private pruneCells = (cells: ICell[], cellMatcher: CellMatcher): nbformat.IBaseCell[] => { // First filter out sys info cells. Jupyter doesn't understand these const filtered = cells.filter((c) => c.data.cell_type !== 'messages'); // Then prune each cell down to just the cell data. return filtered.map((c) => this.pruneCell(c, cellMatcher)); }; private pruneCell = (cell: ICell, cellMatcher: CellMatcher): nbformat.IBaseCell => { // Remove the #%% of the top of the source if there is any. We don't need // this to end up in the exported ipynb file. const copy = { ...cell.data }; copy.source = this.pruneSource(cell.data.source, cellMatcher); return copy; }; private pruneSource = (source: nbformat.MultilineString, cellMatcher: CellMatcher): nbformat.MultilineString => { // Remove the comments on the top if there. if (Array.isArray(source) && source.length > 0) { if (cellMatcher.isCell(source[0])) { return source.slice(1); } } else { const array = source .toString() .split('\n') .map((s) => `${s}\n`); if (array.length > 0 && cellMatcher.isCell(array[0])) { return array.slice(1); } } return source; }; private extractPythonMainVersion = async (): Promise<number> => { // Use the active interpreter const usableInterpreter = await this.jupyterExecution.getUsableJupyterPython(); return usableInterpreter && usableInterpreter.version ? usableInterpreter.version.major : 3; }; }
const notebookFilePath = path.dirname(notebookFile); // First see if we have a workspace open, this only works if we have a workspace root to be relative to
random_line_split
jupyterExporter.ts
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. 'use strict'; import type { nbformat } from '@jupyterlab/coreutils'; import { inject, injectable } from 'inversify'; import * as os from 'os'; import * as path from 'path'; import * as uuid from 'uuid/v4'; import { Uri } from 'vscode'; import { concatMultilineStringInput } from '../../../datascience-ui/common'; import { createCodeCell } from '../../../datascience-ui/common/cellFactory'; import { IApplicationShell, IWorkspaceService } from '../../common/application/types'; import { traceError } from '../../common/logger'; import { IFileSystem, IPlatformService } from '../../common/platform/types'; import { IConfigurationService } from '../../common/types'; import * as localize from '../../common/utils/localize'; import { noop } from '../../common/utils/misc'; import { CellMatcher } from '../cellMatcher'; import { CodeSnippits, Identifiers } from '../constants'; import { CellState, ICell, IDataScienceErrorHandler, IJupyterExecution, INotebookEditorProvider, INotebookExporter } from '../types'; @injectable() export class JupyterExporter implements INotebookExporter { constructor( @inject(IJupyterExecution) private jupyterExecution: IJupyterExecution, @inject(IWorkspaceService) private workspaceService: IWorkspaceService, @inject(IConfigurationService) private configService: IConfigurationService, @inject(IFileSystem) private fileSystem: IFileSystem, @inject(IPlatformService) private readonly platform: IPlatformService, @inject(IApplicationShell) private readonly applicationShell: IApplicationShell, @inject(INotebookEditorProvider) protected ipynbProvider: INotebookEditorProvider, @inject(IDataScienceErrorHandler) protected errorHandler: IDataScienceErrorHandler ) {} public dispose() { noop(); } public async exportToFile(cells: ICell[], file: string): Promise<void> { let directoryChange; const settings = this.configService.getSettings(); if (settings.datascience.changeDirOnImportExport) { directoryChange = file; } const notebook = await this.translateToNotebook(cells, directoryChange); try { // tslint:disable-next-line: no-any const contents = JSON.stringify(notebook); await this.fileSystem.writeFile(file, contents, { encoding: 'utf8', flag: 'w' }); const openQuestion1 = localize.DataScience.exportOpenQuestion1(); const openQuestion2 = (await this.jupyterExecution.isSpawnSupported()) ? localize.DataScience.exportOpenQuestion() : undefined; this.showInformationMessage( localize.DataScience.exportDialogComplete().format(file), openQuestion1, openQuestion2 ).then(async (str: string | undefined) => { try { if (str === openQuestion2 && openQuestion2) { // If the user wants to, open the notebook they just generated. await this.jupyterExecution.spawnNotebook(file); } else if (str === openQuestion1) { await this.ipynbProvider.open(Uri.file(file)); } } catch (e) { await this.errorHandler.handleError(e); } }); } catch (exc) { traceError('Error in exporting notebook file'); this.applicationShell.showInformationMessage(localize.DataScience.exportDialogFailed().format(exc)); } } public async translateToNotebook( cells: ICell[], changeDirectory?: string ): Promise<nbformat.INotebookContent | undefined>
private showInformationMessage( message: string, question1: string, question2?: string ): Thenable<string | undefined> { if (question2) { return this.applicationShell.showInformationMessage(message, question1, question2); } else { return this.applicationShell.showInformationMessage(message, question1); } } // For exporting, put in a cell that will change the working directory back to the workspace directory so relative data paths will load correctly private addDirectoryChangeCell = async (cells: ICell[], file: string): Promise<ICell[]> => { const changeDirectory = await this.calculateDirectoryChange(file, cells); if (changeDirectory) { const exportChangeDirectory = CodeSnippits.ChangeDirectory.join(os.EOL).format( localize.DataScience.exportChangeDirectoryComment(), CodeSnippits.ChangeDirectoryCommentIdentifier, changeDirectory ); const cell: ICell = { data: createCodeCell(exportChangeDirectory), id: uuid(), file: Identifiers.EmptyFileName, line: 0, state: CellState.finished }; return [cell, ...cells]; } else { return cells; } }; // When we export we want to our change directory back to the first real file that we saw run from any workspace folder private firstWorkspaceFolder = async (cells: ICell[]): Promise<string | undefined> => { for (const cell of cells) { const filename = cell.file; // First check that this is an absolute file that exists (we add in temp files to run system cell) if (path.isAbsolute(filename) && (await this.fileSystem.fileExists(filename))) { // We've already check that workspace folders above for (const folder of this.workspaceService.workspaceFolders!) { if (filename.toLowerCase().startsWith(folder.uri.fsPath.toLowerCase())) { return folder.uri.fsPath; } } } } return undefined; }; private calculateDirectoryChange = async (notebookFile: string, cells: ICell[]): Promise<string | undefined> => { // Make sure we don't already have a cell with a ChangeDirectory comment in it. let directoryChange: string | undefined; const haveChangeAlready = cells.find((c) => concatMultilineStringInput(c.data.source).includes(CodeSnippits.ChangeDirectoryCommentIdentifier) ); if (!haveChangeAlready) { const notebookFilePath = path.dirname(notebookFile); // First see if we have a workspace open, this only works if we have a workspace root to be relative to if (this.workspaceService.hasWorkspaceFolders) { const workspacePath = await this.firstWorkspaceFolder(cells); // Make sure that we have everything that we need here if ( workspacePath && path.isAbsolute(workspacePath) && notebookFilePath && path.isAbsolute(notebookFilePath) ) { directoryChange = path.relative(notebookFilePath, workspacePath); } } } // If path.relative can't calculate a relative path, then it just returns the full second path // so check here, we only want this if we were able to calculate a relative path, no network shares or drives if (directoryChange && !path.isAbsolute(directoryChange)) { // Escape windows path chars so they end up in the source escaped if (this.platform.isWindows) { directoryChange = directoryChange.replace('\\', '\\\\'); } return directoryChange; } else { return undefined; } }; private pruneCells = (cells: ICell[], cellMatcher: CellMatcher): nbformat.IBaseCell[] => { // First filter out sys info cells. Jupyter doesn't understand these const filtered = cells.filter((c) => c.data.cell_type !== 'messages'); // Then prune each cell down to just the cell data. return filtered.map((c) => this.pruneCell(c, cellMatcher)); }; private pruneCell = (cell: ICell, cellMatcher: CellMatcher): nbformat.IBaseCell => { // Remove the #%% of the top of the source if there is any. We don't need // this to end up in the exported ipynb file. const copy = { ...cell.data }; copy.source = this.pruneSource(cell.data.source, cellMatcher); return copy; }; private pruneSource = (source: nbformat.MultilineString, cellMatcher: CellMatcher): nbformat.MultilineString => { // Remove the comments on the top if there. if (Array.isArray(source) && source.length > 0) { if (cellMatcher.isCell(source[0])) { return source.slice(1); } } else { const array = source .toString() .split('\n') .map((s) => `${s}\n`); if (array.length > 0 && cellMatcher.isCell(array[0])) { return array.slice(1); } } return source; }; private extractPythonMainVersion = async (): Promise<number> => { // Use the active interpreter const usableInterpreter = await this.jupyterExecution.getUsableJupyterPython(); return usableInterpreter && usableInterpreter.version ? usableInterpreter.version.major : 3; }; }
{ // If requested, add in a change directory cell to fix relative paths if (changeDirectory && this.configService.getSettings().datascience.changeDirOnImportExport) { cells = await this.addDirectoryChangeCell(cells, changeDirectory); } const pythonNumber = await this.extractPythonMainVersion(); // Use this to build our metadata object const metadata: nbformat.INotebookMetadata = { language_info: { codemirror_mode: { name: 'ipython', version: pythonNumber }, file_extension: '.py', mimetype: 'text/x-python', name: 'python', nbconvert_exporter: 'python', pygments_lexer: `ipython${pythonNumber}`, version: pythonNumber }, orig_nbformat: 2 }; // Create an object for matching cell definitions const matcher = new CellMatcher(this.configService.getSettings().datascience); // Combine this into a JSON object return { cells: this.pruneCells(cells, matcher), nbformat: 4, nbformat_minor: 2, metadata: metadata }; }
identifier_body
routex.go
package routex import ( "broker" "encoding/json" "fmt" "github.com/googollee/go-pubsub" "github.com/googollee/go-rest" "logger" "math/rand" "model" "net/http" "net/url" "notifier" "os" "routex/model" "sync" "time" ) type RouteMap struct { rest.Service `prefix:"/v3/routex" mime:"application/json"` updateIdentity rest.SimpleNode `route:"/_inner/update_identity" method:"POST"` updateExfee rest.SimpleNode `route:"/_inner/update_exfee" method:"POST"` searchRoutex rest.SimpleNode `route:"/_inner/search/crosses" method:"POST"` getRoutex rest.SimpleNode `route:"/_inner/users/:user_id/crosses/:cross_id" method:"GET"` setUser rest.SimpleNode `route:"/users/crosses/:cross_id" method:"POST"` updateBreadcrums rest.SimpleNode `route:"/breadcrumbs" method:"POST"` updateBreadcrumsInner rest.SimpleNode `route:"/_inner/breadcrumbs/users/:user_id" method:"POST"` getBreadcrums rest.SimpleNode `route:"/breadcrumbs/crosses/:cross_id" method:"GET"` getUserBreadcrums rest.SimpleNode `route:"/breadcrumbs/crosses/:cross_id/users/:user_id" method:"GET"` getUserBreadcrumsInner rest.SimpleNode `route:"/_inner/breadcrumbs/users/:user_id" method:"GET"` searchGeomarks rest.SimpleNode `route:"/_inner/geomarks/crosses/:cross_id" method:"GET"` getGeomarks rest.SimpleNode `route:"/geomarks/crosses/:cross_id" method:"GET"` setGeomark rest.SimpleNode `route:"/geomarks/crosses/:cross_id/:mark_type/:kind.:mark_id" method:"PUT"` deleteGeomark rest.SimpleNode `route:"/geomarks/crosses/:cross_id/:mark_type/:kind.:mark_id" method:"DELETE"` stream rest.Streaming `route:"/crosses/:cross_id" method:"WATCH"` options rest.SimpleNode `route:"/crosses/:cross_id" method:"OPTIONS"` sendNotification rest.SimpleNode `route:"/notification/crosses/:cross_id" method:"POST"` rand *rand.Rand routexRepo rmodel.RoutexRepo breadcrumbCache rmodel.BreadcrumbCache breadcrumbsRepo rmodel.BreadcrumbsRepo geomarksRepo rmodel.GeomarksRepo conversion rmodel.GeoConversionRepo platform *broker.Platform config *model.Config tutorialDatas map[int64][]rmodel.TutorialData pubsub *pubsub.Pubsub castLocker sync.RWMutex quit chan int } func New(routexRepo rmodel.RoutexRepo, breadcrumbCache rmodel.BreadcrumbCache, breadcrumbsRepo rmodel.BreadcrumbsRepo, geomarksRepo rmodel.GeomarksRepo, conversion rmodel.GeoConversionRepo, platform *broker.Platform, config *model.Config) (*RouteMap, error) { tutorialDatas := make(map[int64][]rmodel.TutorialData) for _, userId := range config.TutorialBotUserIds { file := config.Routex.TutorialDataFile[fmt.Sprintf("%d", userId)] f, err := os.Open(file) if err != nil { return nil, fmt.Errorf("can't find tutorial file %s for tutorial bot %d", file, userId) } var datas []rmodel.TutorialData decoder := json.NewDecoder(f) err = decoder.Decode(&datas) if err != nil { return nil, fmt.Errorf("invalid tutorial data %s for tutorial bot %d: %s", file, userId, err) } tutorialDatas[userId] = datas } ret := &RouteMap{ rand: rand.New(rand.NewSource(time.Now().Unix())), routexRepo: routexRepo, breadcrumbCache: breadcrumbCache, breadcrumbsRepo: breadcrumbsRepo, geomarksRepo: geomarksRepo, conversion: conversion, platform: platform, tutorialDatas: tutorialDatas, config: config, pubsub: pubsub.New(20), quit: make(chan int), } go ret.tutorialGenerator() return ret, nil } func (m RouteMap) UpdateIdentity(ctx rest.Context, identity model.Identity) { id := rmodel.Identity{ Identity: identity, Type: "identity", Action: "update", } m.pubsub.Publish(m.identityName(identity), id) } func (m RouteMap) UpdateExfee(ctx rest.Context, invitations model.Invitation) { var crossId int64 var action string ctx.Bind("cross_id", &crossId) ctx.Bind("action", &action) if err := ctx.BindError(); err != nil { ctx.Return(http.StatusBadRequest, err) return } if action != "join" && action != "remove" { ctx.Return(http.StatusBadRequest, "invalid action: %s", action) return } id := rmodel.Invitation{ Identity: invitations.Identity, Notifications: invitations.Notifications, Type: "invitation", Action: action, } m.pubsub.Publish(m.publicName(crossId), id) } type UserCrossSetup struct { SaveBreadcrumbs bool `json:"save_breadcrumbs,omitempty"` AfterInSeconds int `json:"after_in_seconds,omitempty"` } func (m RouteMap) SetUser(ctx rest.Context, setup UserCrossSetup) { token, ok := m.auth(ctx) if !ok { ctx.Return(http.StatusUnauthorized, "invalid token") return } var crossId int64 ctx.Bind("cross_id", &crossId) if err := ctx.BindError(); err != nil { ctx.Return(http.StatusBadRequest, err) return } if setup.AfterInSeconds == 0 { setup.AfterInSeconds = 60 * 60 } m.switchWindow(crossId, token.Identity, setup.SaveBreadcrumbs, setup.AfterInSeconds) } func (m RouteMap) SearchRoutex(ctx rest.Context, crossIds []int64) { ret, err := m.routexRepo.Search(crossIds) if err != nil { logger.ERROR("search for route failed: %s with %+v", err, crossIds) ctx.Return(http.StatusInternalServerError, err) return } ctx.Render(ret) } type RoutexInfo struct { InWindow *bool `json:"in_window"` Objects []rmodel.Geomark `json:"objects"` } func (m RouteMap) GetRoutex(ctx rest.Context) { var userId, crossId int64 ctx.Bind("cross_id", &crossId) ctx.Bind("user_id", &userId) if err := ctx.BindError(); err != nil { ctx.Return(http.StatusBadRequest, err) return } endAt, err := m.breadcrumbsRepo.GetWindowEnd(userId, crossId) if err != nil { logger.ERROR("get user %d cross %d routex failed: %s", userId, crossId, err) ctx.Return(http.StatusInternalServerError, err) return } ret := RoutexInfo{} if endAt != 0 { ret.InWindow = new(bool) *ret.InWindow = endAt >= time.Now().Unix() } query := make(url.Values) query.Set("user_id", fmt.Sprintf("%d", userId)) cross, err := m.platform.FindCross(crossId, query) if err == nil { ret.Objects = m.getObjects(cross, true) } else { logger.ERROR("get user %d cross %d failed: %s", userId, crossId, err) ctx.Return(http.StatusInternalServerError, err) return } ctx.Render(ret) } func (m RouteMap) Stream(ctx rest.StreamContext) { token, ok := m.auth(ctx) if !ok { ctx.Return(http.StatusUnauthorized, "invalid token") return } var forceOpen bool var coordinate string ctx.Bind("force_window_open", &forceOpen) ctx.Bind("coordinate", &coordinate) if err := ctx.BindError(); err != nil { ctx.Return(http.StatusBadRequest, err) return } now := time.Now() endAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID)) if err != nil || endAt <= now.Unix() { if !forceOpen { ctx.Return(http.StatusForbidden, "not in window") return } after := 15 * 60 if endAt == 0 { after = 60 * 60 } var openAfter int ctx.BindReset() ctx.Bind("force_window_open", &openAfter) if ctx.BindError() == nil { after = openAfter } endAt = now.Unix() + int64(after) m.switchWindow(int64(token.Cross.ID), token.Identity, true, after) } c := make(chan interface{}, 10) m.pubsub.Subscribe(m.publicName(int64(token.Cross.ID)), c) if token.Cross.By.UserID == m.config.Routex.TutorialCreator { m.pubsub.Subscribe(m.tutorialName(), c) } for _, inv := range token.Cross.Exfee.Invitations { m.pubsub.Subscribe(m.identityName(inv.Identity), c) } logger.DEBUG("streaming connected by user %d, cross %d", token.UserId, token.Cross.ID) defer func() { logger.DEBUG("streaming disconnect by user %d, cross %d", token.UserId, token.Cross.ID) m.pubsub.UnsubscribeAll(c) close(c) }() willEnd := endAt - now.Unix() err = ctx.Render(map[string]interface{}{ "type": "command", "action": "close_after", "args": []interface{}{willEnd}, }) if err != nil { return } toMars := coordinate == "mars" isTutorial := false if token.Cross.By.UserID == m.config.Routex.TutorialCreator { isTutorial = true } hasCreated := false ctx.Return(http.StatusOK) quit := make(chan int) defer func() { close(quit) }() for _, mark := range m.getObjects(token.Cross, toMars) { if isTutorial && !hasCreated && !mark.IsBreadcrumbs() { hasCreated = true } if err := ctx.Render(mark); err != nil { return } } ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout)) if err := ctx.Render(map[string]string{"type": "command", "action": "init_end"}); err != nil { return } lastCheck := now.Unix() for ctx.Ping() == nil { select { case d := <-c: switch data := d.(type) { case rmodel.Geomark: if isTutorial && !hasCreated { if data.Id == m.breadcrumbsId(token.UserId) { locale, by := "", "" for _, i := range token.Cross.Exfee.Invitations { if i.Identity.UserID == token.UserId { locale, by = i.Identity.Locale, i.Identity.Id() break } } tutorialMark, err := m.setTutorial(data.Positions[0].GPS[0], data.Positions[0].GPS[1], token.UserId, int64(token.Cross.ID), locale, by) if err != nil { logger.ERROR("create tutorial geomark for user %d in cross %d failed: %s", token.UserId, token.Cross.ID, err) } else { hasCreated = true if toMars { tutorialMark.ToMars(m.conversion) } err := ctx.Render(tutorialMark) if err != nil { return } } } } if toMars { data.ToMars(m.conversion) } d = data case rmodel.Identity: switch data.Action { case "join": if token.Cross.Exfee.Join(data.Identity) { m.pubsub.Subscribe(m.identityName(data.Identity), c) } case "remove": if token.Cross.Exfee.Remove(data.Identity) { m.pubsub.Unsubscribe(m.identityName(data.Identity), c) } } } ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout)) err := ctx.Render(d) if err != nil { return } case <-time.After(broker.NetworkTimeout): case <-time.After(time.Duration(endAt-time.Now().Unix()) * time.Second): newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID)) if err != nil || newEndAt == 0 || newEndAt <= time.Now().Unix() { return } endAt = newEndAt err = ctx.Render(map[string]interface{}{ "type": "command", "action": "close_after", "args": []interface{}{endAt - time.Now().Unix()}, }) if err != nil { return } } if time.Now().Unix()-lastCheck > 60 { lastCheck = time.Now().Unix() newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID)) if err != nil { logger.ERROR("can't set user %d cross %d: %s", token.UserId, token.Cross.ID, err) continue } endAt = newEndAt err = ctx.Render(map[string]interface{}{ "type": "command", "action": "close_after", "args": []interface{}{endAt - time.Now().Unix()}, }) if err != nil { return } } } } func (m RouteMap) Options(ctx rest.Context) { ctx.Response().Header().Set("Access-Control-Allow-Origin", m.config.AccessDomain) ctx.Response().Header().Set("Access-Control-Allow-Credentials", "true") ctx.Response().Header().Set("Cache-Control", "no-cache") ctx.Return(http.StatusNoContent) } func (m RouteMap) SendNotification(ctx rest.Context) { token, ok := m.auth(ctx) if !ok { ctx.Return(http.StatusUnauthorized, "invalid token") return } var id string ctx.Bind("id", &id) if err := ctx.BindError(); err != nil { ctx.Return(http.StatusBadRequest, err) return } to := model.FromIdentityId(id) var toInvitation *model.Invitation for _, inv := range token.Cross.Exfee.Invitations { if inv.Identity.Equal(to) { toInvitation = &inv break } } if toInvitation == nil { ctx.Return(http.StatusForbidden, "%s is not attend cross %d", to.Id(), token.Cross.ID) return } to = toInvitation.Identity recipients, err := m.platform.GetRecipientsById(to.Id()) if err != nil { ctx.Return(http.StatusInternalServerError, err) return } m.update(int64(token.Cross.ID), token.Identity) arg := notifier.RequestArg{ CrossId: token.Cross.ID, From: token.Identity, } pushed := false for _, recipient := range recipients { switch recipient.Provider { case "iOS": fallthrough case "Android": arg.To = recipient m.sendRequest(arg) pushed = true } } if to.Provider == "wechat" { if ok, err := m.platform.CheckWechatFollowing(to.ExternalUsername); (err != nil || !ok) && !pushed { ctx.Return(http.StatusNotAcceptable, "can't find provider avaliable") } } go func() { arg.To = to.ToRecipient() m.sendRequest(arg) for _, id := range toInvitation.Notifications { to := model.FromIdentityId(id) arg.To.ExternalUsername, arg.To.Provider = to.ExternalUsername, to.Provider m.sendRequest(arg) } }() } func (m *RouteMap) getObjects(cross model.Cross, toMars bool) []rmodel.Geomark { isTutorial := false if cross.By.UserID == m.config.Routex.TutorialCreator { isTutorial = true } var ret []rmodel.Geomark breadcrumbs, err := m.breadcrumbCache.LoadAllCross(int64(cross.ID)) now := time.Now() if isTutorial { for _, id := range m.config.TutorialBotUserIds { l := m.getTutorialData(now, id, 1) if len(l) > 0 { breadcrumbs[id] = l[0] } } } users := make(map[int64]bool) for _, inv := range cross.Exfee.Invitations { users[inv.Identity.UserID] = true } if err == nil { for userId, l := range breadcrumbs { if !users[userId] { if err := m.breadcrumbCache.RemoveCross(userId, int64(cross.ID)); err != nil { logger.ERROR("remove user %d cross %d breadcrumb error: %s", userId, cross.ID, err) } continue } mark := m.breadcrumbsToGeomark(userId, 1, []rmodel.SimpleLocation{l}) if toMars { mark.ToMars(m.conversion) } ret = append(ret, mark) } } else { logger.ERROR("can't get current breadcrumb of cross %d: %s", cross.ID, err) } marks, err := m.getGeomarks_(cross, toMars) if err == nil { ret = append(ret, marks...) } else { logger.ERROR("can't get route of cross %d: %s", cross.ID, err) } return ret } func (m *RouteMap) sendRequest(arg notifier.RequestArg) { body, err := json.Marshal(arg) if err != nil { logger.ERROR("can't marshal: %s with %+v", err, arg) return } url := fmt.Sprintf("http://%s:%d/v3/notifier/routex/request", m.config.ExfeService.Addr, m.config.ExfeService.Port) resp, err := broker.HttpResponse(broker.Http("POST", url, "applicatioin/json", body)) if err != nil { logger.ERROR("post %s error: %s with %#v", url, err, string(body)) return } resp.Close() } func (m RouteMap) switchWindow(crossId int64, identity model.Identity, save bool, afterInSeconds int) { m.update(crossId, identity) if save { if err := m.breadcrumbsRepo.EnableCross(identity.UserID, crossId, afterInSeconds); err != nil
if err := m.breadcrumbCache.EnableCross(identity.UserID, crossId, afterInSeconds); err != nil { logger.ERROR("set user %d enable cross %d breadcrumb cache failed: %s", identity.UserID, crossId, err) } } else { if err := m.breadcrumbsRepo.DisableCross(identity.UserID, crossId); err != nil { logger.ERROR("set user %d disable cross %d breadcrumbs repo failed: %s", identity.UserID, crossId, err) } if err := m.breadcrumbCache.DisableCross(identity.UserID, crossId); err != nil { logger.ERROR("set user %d disable cross %d breadcrumb cache failed: %s", identity.UserID, crossId, err) } } } func (m RouteMap) update(crossId int64, by model.Identity) { if err := m.routexRepo.Update(crossId); err != nil { logger.ERROR("update routex user %d cross %d error: %s", err) } cross := make(map[string]interface{}) cross["widgets"] = []map[string]string{ map[string]string{"type": "routex"}, } m.platform.BotCrossUpdate("cross_id", fmt.Sprintf("%d", crossId), cross, by) } func (m *RouteMap) auth(ctx rest.Context) (rmodel.Token, bool) { ctx.Response().Header().Set("Access-Control-Allow-Origin", m.config.AccessDomain) ctx.Response().Header().Set("Access-Control-Allow-Credentials", "true") ctx.Response().Header().Set("Cache-Control", "no-cache") defer ctx.BindReset() var token rmodel.Token authData := ctx.Request().Header.Get("Exfe-Auth-Data") // if authData == "" { // authData = `{"token_type":"user_token","user_id":475,"signin_time":1374046388,"last_authenticate":1374046388}` // } if authData != "" { if err := json.Unmarshal([]byte(authData), &token); err != nil { return token, false } } var crossIdFlag bool ctx.Bind("cross_id", &crossIdFlag) if ctx.BindError() != nil || !crossIdFlag { if token.TokenType == "user_token" { return token, true } return token, false } var crossId int64 ctx.Bind("cross_id", &crossId) if err := ctx.BindError(); err != nil { return token, false } query := make(url.Values) switch token.TokenType { case "user_token": query.Set("user_id", fmt.Sprintf("%d", token.UserId)) case "cross_access_token": if int64(token.CrossId) != crossId { return token, false } default: return token, false } var err error if token.Cross, err = m.platform.FindCross(int64(crossId), query); err != nil { return token, false } for _, inv := range token.Cross.Exfee.Invitations { switch token.TokenType { case "cross_access_token": if inv.Identity.ID == token.IdentityId { token.UserId = inv.Identity.UserID token.Identity = inv.Identity return token, true } case "user_token": if inv.Identity.UserID == token.UserId { token.Identity = inv.Identity return token, true } } } return token, false } func (m RouteMap) publicName(crossId int64) string { return fmt.Sprintf("routex:cross_%d", crossId) } func (m RouteMap) tutorialName() string { return "routex:tutorial:data" } func (m RouteMap) identityName(identity model.Identity) string { return fmt.Sprintf("routex:identity:%s", identity.Id()) } func (m RouteMap) tutorialGenerator() { for { select { case <-m.quit: return case <-time.After(time.Second * 10): now := time.Now() for userId := range m.tutorialDatas { positions := m.getTutorialData(now, userId, 1) if len(positions) == 0 { continue } mark := m.breadcrumbsToGeomark(userId, 1, positions) m.pubsub.Publish(m.tutorialName(), mark) } } } }
{ logger.ERROR("set user %d enable cross %d breadcrumbs repo failed: %s", identity.UserID, crossId, err) }
conditional_block