id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4,100 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/item.py
|
alignak.objects.item.Item
|
class Item(AlignakObject):
"""Class to manage an item
An Item is the base of many objects of Alignak. So it defines properties that are common
to all the objects:
- name
- register: it is a real object (True) or a template definition (False)
- imported_from: source configuration file or backend
- use: templates which this object inherits from
- definition_order: priority if the same object is defined several times
- tags: the information tags attached to an object
Note: the tags are automatically set on an object when it uses some templates.
And some configuration parsing information:
- conf_is_correct: whether configuration is correct or not
- configuration_warnings: list of configuration parsing warning log
- configuration_errors: list of configuration parsing error log
common functions.
"""
properties = AlignakObject.properties.copy()
properties.update({
'name':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'imported_from':
StringProp(default=u'alignak-self', fill_brok=[FULL_STATUS]),
'use':
ListProp(default=[], split_on_comma=True, fill_brok=[FULL_STATUS]),
'definition_order':
IntegerProp(default=100),
'register':
BoolProp(default=True)
})
running_properties = {
# All errors and warning raised during the configuration parsing
# and that will raise real warning/errors during the configuration correctness check
'conf_is_correct':
BoolProp(default=True),
'configuration_warnings':
ListProp(default=[]),
'configuration_errors':
ListProp(default=[]),
# We save all templates we asked us to load from
'tags':
ListProp(default=[], fill_brok=[FULL_STATUS]),
# used by host, service and contact
# todo: conceptually this should be moved to the SchedulingItem and Contact objects...
'downtimes':
DictProp(default={}, fill_brok=[FULL_STATUS],
retention=True, retention_preparation=dict_to_serialized_dict),
}
macros = {
}
my_type = ''
my_name_property = "name"
ok_up = ''
def __init__(self, params, parsing=True): # pylint: disable=too-many-branches
if params is None:
params = {}
# Assuming a default correct configuration
self.conf_is_correct = True
self.configuration_warnings = []
self.configuration_errors = []
# pylint: disable=too-many-branches
if not parsing:
# Deserialize an existing object
# todo: Why not initializing the running properties in this case?
super(Item, self).__init__(params, parsing)
logger.debug("Restore %s: %s (%s)", self.__class__.my_type, self.uuid, self.get_name())
return
super(Item, self).__init__(params, parsing)
# The new Alignak object unique identifier is set by the base AlignakObject class...
logger.debug("New %s: %s", self.__class__.my_type, self.uuid)
# For unique name identifier - first is to set the item name
# This, because the aprams is an unordered dictionary and it is better to get
# the item name as soon as possible
name_property = getattr(self.__class__, "my_name_property", None)
if not name_property: # pragma: no cover, never called
raise "Missing my_name_property in class: %s" % self.__class__.inner_class
if not hasattr(self, name_property) \
and name_property in params \
and name_property in self.properties:
setattr(self, name_property,
self.properties[name_property].pythonize(params[name_property]))
logger.debug("Item %s: %s, named: %s", self.__class__.my_type, self.uuid, self.get_name())
# For importation source
if not hasattr(self, 'imported_from'):
self.imported_from = u'alignak-self'
# For custom variables
if not hasattr(self, 'customs'):
self.customs = {}
# For values with a +
if not hasattr(self, 'plus'):
self.plus = {}
if not hasattr(self, 'old_properties'):
self.old_properties = {}
self.init_running_properties()
# [0] = + -> new key-plus
# [0] = _ -> new custom entry in UPPER case
for key in params:
# We want to create instance of object with the good type.
# Here we've just parsed config files so everything is a string or a list.
# We use the pythonize method to get the good type.
try:
if key in self.properties:
val = self.properties[key].pythonize(params[key])
elif key in self.running_properties:
self.add_warning("using the running property '%s' in the configuration. "
"This is accepted but not recommended!" % key)
val = self.running_properties[key].pythonize(params[key])
elif hasattr(self, 'old_properties') and key in self.old_properties:
self.add_warning("using an old Nagios property '%s' in the configuration. "
"This is accepted but not recommended!" % key)
val = self.properties[self.old_properties[key]].pythonize(params[key])
elif key.startswith('_'): # custom macro, not need to detect something here
macro = params[key]
# If it's a string, directly use this
if isinstance(macro, string_types):
val = macro
# a list for a custom macro is not managed (conceptually invalid)
# so take the first defined
elif isinstance(macro, list) and macro:
val = macro[0]
# not a list of void? just put void string so
else:
# For #972, a debug log is enough for such an information,
# a configuration warning is too much
val = "%s" % macro
logger.debug("%s, set the no-string macro property '%s' as '%s'",
self.get_full_name(), key, val)
# After this a macro is always containing a string value!
else:
# logger.debug("Guessing the property '%s' type because it "
# "is not in %s object properties", key, self.__class__.__name__)
val = ToGuessProp().pythonize(params[key])
logger.debug("Set the property '%s' type as %s", key, type(val))
except (PythonizeError, AttributeError, ValueError, TypeError) as exp:
self.add_error("Error while pythonizing parameter '%s': %s" % (key, exp))
continue
# checks for attribute value special syntax (+ or _)
# we can have '+param' or ['+template1' , 'template2']
if isinstance(val, string_types) and len(val) >= 1 and val[0] == '+':
self.add_error("A + value for a single string (%s) is not handled" % key)
continue
if (isinstance(val, list) and
len(val) >= 1 and
isinstance(val[0], string_types) and
len(val[0]) >= 1 and
val[0][0] == '+'):
# We manage a list property which first element is a string that starts with +
val[0] = val[0][1:]
self.plus[key] = val # we remove the +
elif key[0] == "_":
# Except for some specific configuration variables
# _dist prefixed variables are reserved for Alignak directories
if not key.startswith('_dist'):
custom_name = key.upper()
self.customs[custom_name] = val
else:
setattr(self, key, val)
# Change Nagios2 names to Nagios3 ones (before using them)
self.old_properties_names_to_new()
def __str__(self): # pragma: no cover
return '<%s name=%s />' % (self.__class__.__name__, self.get_name())
__repr__ = __str__
def init_running_properties(self):
"""
Initialize the running_properties.
Each instance have own property.
:return: None
"""
for prop, entry in list(self.__class__.running_properties.items()):
val = entry.default
# Make a copy of the value for complex iterable types
# As such, each instance has its own copy and not a simple reference
setattr(self, prop, copy(val) if isinstance(val, (set, list, dict)) else val)
def copy(self):
"""
Get a copy of this item but with a new id
:return: copy of this object with a new id
:rtype: object
"""
# New dummy item with it's own running properties
copied_item = self.__class__({})
# Now, copy the properties
for prop in self.__class__.properties:
if prop in ['uuid']:
continue
val = getattr(self, prop, None)
if val is not None:
setattr(copied_item, prop, val)
# Also copy some running properties
# The custom variables
if hasattr(self, "customs"):
copied_item.customs = copy(self.customs)
# The tags
if hasattr(self, "tags"):
copied_item.tags = copy(self.tags)
# The templates
if hasattr(self, "templates"):
copied_item.templates = copy(self.templates)
return copied_item
def clean(self):
"""
Clean properties only needed for initialization and configuration
:return: None
"""
for prop in ['imported_from', 'use', 'plus',
'definition_order', 'valid', 'conf_is_correct']:
try:
delattr(self, prop)
except AttributeError:
pass
for prop in ['configuration_warnings', 'configuration_errors']:
try:
if hasattr(self, prop) and not getattr(self, prop):
delattr(self, prop)
except AttributeError:
pass
def get_name(self, index=False):
"""
Get the name of the item
For a template, returns the `name` attribute
For an item, return the attribute pointed to by the configured
`my_name_property` of the class if it is defined. Else, returns the `name`
property.
;:param index: to indicate if we are indexing the objects during the parsing phase
:type index: bool
:return: the object name string
:rtype: str
"""
name_property = getattr(self.__class__, "my_name_property", None)
if not name_property: # pragma: no cover, never called
raise "Missing my_name_property in class: %s" % self.__class__.inner_class
if index and hasattr(self.__class__, "my_index_property"):
# May be a specific property for indexing the items (eg. services...)
name_property = getattr(self.__class__, "my_index_property")
if self.is_a_template():
return getattr(self, 'name', getattr(self, name_property, 'Unnamed'))
if name_property:
return getattr(self, name_property, 'Unnamed')
return getattr(self, 'name', 'Unnamed')
def get_full_name(self):
"""Get the full name (eg. for services or complex objects)
Default is to get the current name
:return: the object full name string
:rtype: str
"""
return self.get_name()
def _get_name(self):
"""Get the name of the object
:return: the object name string
:rtype: str
"""
return self.get_name()
def is_a_template(self):
"""
Check if this object is a template
:return: True if is a template, else False
:rtype: bool
"""
return not getattr(self, "register", True)
def serialize(self, no_json=True, printing=False):
"""This function serializes into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here is the generic function that simply export attributes declared in the
properties dictionary and the running_properties of the object.
Note that no json dump happens in this method. If json dump must be done,
it will be elsewhere!
:return: Dictionary containing key and value from properties and running_properties
:rtype: dict
"""
# uuid is not in *_properties
res = {
'uuid': self.uuid
}
for prop in self.__class__.properties:
try:
res[prop] = getattr(self, prop)
if hasattr(res[prop], "serialize"):
res[prop] = {
'__sys_python_module__': "%s.%s" % (res[prop].__class__.__module__,
res[prop].__class__.__name__),
'content': res[prop].serialize(no_json=no_json, printing=printing)
}
if res[prop] is NONE_OBJECT:
print("%s / %s is NONE" % (self, prop))
res[prop] = "None"
except AttributeError:
pass
for prop in self.__class__.running_properties:
try:
res[prop] = getattr(self, prop)
if hasattr(res[prop], "serialize"):
res[prop] = {
'__sys_python_module__': "%s.%s" % (res[prop].__class__.__module__,
res[prop].__class__.__name__),
'content': res[prop].serialize(no_json=no_json, printing=printing)
}
if res[prop] is NONE_OBJECT:
print("%s / %s is NONE" % (self, prop))
res[prop] = "None"
except AttributeError:
pass
# if self.my_type in ['host']:
# print("---Serialized %s: %s" % (self.my_type, res))
return res
@classmethod
def load_global_conf(cls, global_configuration):
"""
Apply global Alignak configuration.
Some objects inherit some properties from the global configuration if they do not
define their own value. E.g. the global 'accept_passive_service_checks' is inherited
by the services as 'accept_passive_checks'
:param cls: parent object
:type cls: object
:param global_configuration: current object (child)
:type global_configuration: object
:return: None
"""
logger.debug("Propagate global parameter for %s:", cls)
for prop, entry in global_configuration.properties.items():
# If some global managed configuration properties have a class_inherit clause,
if not entry.managed or not getattr(entry, 'class_inherit'):
continue
for (cls_dest, change_name) in entry.class_inherit:
if cls_dest == cls: # ok, we've got something to get
value = getattr(global_configuration, prop)
logger.debug("- global parameter %s=%s -> %s=%s",
prop, getattr(global_configuration, prop),
change_name, value)
if change_name is None:
setattr(cls, prop, value)
else:
setattr(cls, change_name, value)
def get_templates(self):
"""
Get list of templates this object use
:return: list of templates
:rtype: list
"""
use = getattr(self, 'use', '')
if isinstance(use, list):
return [n.strip() for n in use if n.strip()]
return [n.strip() for n in use.split(',') if n.strip()]
def has_plus(self, prop):
"""
Check if self.plus list have this property
:param prop: property to check
:type prop: str
:return: True is self.plus has this property, otherwise False
:rtype: bool
"""
return prop in self.plus
# try:
# self.plus[prop]
# except KeyError:
# return False
# return True
def get_all_plus_and_delete(self):
"""
Get all self.plus items of list. We copy it, delete the original and return the copy list
:return: list of self.plus
:rtype: list
"""
res = {}
props = list(self.plus.keys()) # we delete entries, so no for ... in ...
for prop in props:
res[prop] = self.get_plus_and_delete(prop)
return res
def get_plus_and_delete(self, prop):
"""
get a copy of the property (parameter) in self.plus, delete the original and return the
value of copy
:param prop: a property
:type prop: str
:return: return the value of the property
:rtype: str
"""
val = self.plus[prop]
del self.plus[prop]
return val
def add_error(self, message):
"""Add a message in the configuration errors list so we can print them
all in one place
Set the object configuration as not correct
:param message: error message or a list of messages
:type message: str | list
:return: None
"""
if not hasattr(self, 'configuration_errors'):
self.configuration_errors = []
if not isinstance(message, list):
message = [message]
for txt in message:
self.configuration_errors.append("[{}::{}] {}".format(self.my_type,
self.get_name(), txt))
self.conf_is_correct = False
def add_warning(self, message):
"""Add a message in the configuration warnings list so we can print them
all in one place
:param message: warning message
:type message: str
:return: None
"""
if not hasattr(self, 'configuration_warnings'):
self.configuration_warnings = []
if not isinstance(message, list):
message = [message]
for txt in message:
self.configuration_warnings.append("[{}::{}] {}".format(self.my_type,
self.get_name(), txt))
def is_correct(self):
"""
Check if this object is correct
This function:
- ignores the special_properties if some exist
- checks if the required properties are defined,
:return: True if it's correct, otherwise False
:rtype: bool
"""
for prop, entry in list(self.__class__.properties.items()):
if prop in getattr(self, 'special_properties', []):
continue
if not hasattr(self, prop) and entry.required:
self.add_error("%s required property is missing" % prop)
return self.conf_is_correct
def old_properties_names_to_new(self):
"""
This function is used by service and hosts to transform Nagios2 parameters to Nagios3
ones, like normal_check_interval to check_interval.
There is a old_parameters tab in Classes that need such modifications.
:return: None
"""
old_properties = getattr(self.__class__, "old_properties", {})
for old_name, new_name in list(old_properties.items()):
# Ok, if we got old_name and NO new name,
# we switch the name
if hasattr(self, old_name) and not hasattr(self, new_name):
value = getattr(self, old_name)
setattr(self, new_name, value)
delattr(self, old_name)
def add_downtime(self, downtime):
"""
Add a downtime in this object
:param downtime: a Downtime object
:type downtime: object
:return: None
"""
self.downtimes[downtime.uuid] = downtime
def del_downtime(self, downtime_id):
"""
Delete a downtime in this object
:param downtime_id: id of the downtime to delete
:type downtime_id: int
:return: None
"""
if downtime_id in self.downtimes:
self.downtimes[downtime_id].can_be_deleted = True
del self.downtimes[downtime_id]
def add_comment(self, comment):
"""
Add a comment to this object
:param comment: a Comment object
:type comment: object
:return: None
"""
self.comments[comment.uuid] = comment
def del_comment(self, comment_id):
"""
Delete a comment in this object
:param comment_id: id of the comment to delete
:type comment_id: int
:return: None
"""
if comment_id in self.comments:
del self.comments[comment_id]
def get_property_value_for_brok(self, prop, tab):
"""
Get the property of an object and brok_transformation if needed and return the value
:param prop: property name
:type prop: str
:param tab: object with all properties of an object
:type tab: object
:return: value of the property original or brok converted
:rtype: str
"""
entry = tab[prop]
# Get the current value, or the default if need
value = getattr(self, prop, entry.default)
# Apply brok_transformation if need
# Look if we must preprocess the value first
pre_op = entry.brok_transformation
if pre_op is not None:
value = pre_op(self, value)
return value
def fill_data_brok_from(self, data, brok_type):
"""
Add properties to 'data' parameter with properties of this object when 'brok_type'
parameter is defined in fill_brok of these properties
:param data: object to fill
:type data: object
:param brok_type: name of brok_type
:type brok_type: var
:return: None
"""
cls = self.__class__
# Configuration properties
for prop, entry in list(cls.properties.items()):
# Is this property intended for broking?
if brok_type in entry.fill_brok:
data[prop] = self.get_property_value_for_brok(prop, cls.properties)
# And the running properties
if hasattr(cls, 'running_properties'):
# We've got prop in running_properties too
for prop, entry in list(cls.running_properties.items()):
# if 'fill_brok' in cls.running_properties[prop]:
if brok_type in entry.fill_brok:
data[prop] = self.get_property_value_for_brok(prop, cls.running_properties)
def get_initial_status_brok(self, extra=None):
"""
Create an initial status brok
:param extra: some extra information to be added in the brok data
:type extra: dict
:return: Brok object
:rtype: alignak.Brok
"""
data = {'uuid': self.uuid}
self.fill_data_brok_from(data, FULL_STATUS)
if extra:
data.update(extra)
if self.is_a_template():
return Brok({'type': 'initial_' + self.my_type + '_template_status', 'data': data})
return Brok({'type': 'initial_' + self.my_type + '_status', 'data': data})
def get_new_brok(self, name):
"""
Create a new item brok
:param name: name of the new object
:type name: str
:return: Brok object
:rtype: alignak.Brok
"""
return Brok({'type': 'new_' + self.my_type, 'data': {'uuid': self.uuid, 'name': name}})
def get_update_status_brok(self):
"""
Create an update item brok
:return: Brok object
:rtype: alignak.Brok
"""
data = {'uuid': self.uuid}
self.fill_data_brok_from(data, FULL_STATUS)
return Brok({'type': 'update_' + self.my_type + '_status', 'data': data})
def get_check_result_brok(self):
"""
Create check_result brok
:return: Brok object
:rtype: alignak.Brok
"""
# data = {'uuid': self.uuid}
data = {}
self.fill_data_brok_from(data, CHECK_RESULT)
return Brok({'type': self.my_type + '_check_result', 'data': data})
def get_next_schedule_brok(self):
"""
Create next_schedule (next check) brok
:return: Brok object
:rtype: alignak.Brok
"""
data = {'uuid': self.uuid}
self.fill_data_brok_from(data, 'next_schedule')
return Brok({'type': self.my_type + '_next_schedule', 'data': data})
def get_snapshot_brok(self, snap_output, exit_status):
"""
Create snapshot (check_result type) brok
:param snap_output: value of output
:type snap_output: str
:param exit_status: status of exit
:type exit_status: integer
:return: Brok object
:rtype: alignak.Brok
"""
data = {
'uuid': self.uuid,
'snapshot_output': snap_output,
'snapshot_time': int(time.time()),
'snapshot_exit_status': exit_status,
}
self.fill_data_brok_from(data, CHECK_RESULT)
return Brok({'type': self.my_type + '_snapshot', 'data': data})
|
class Item(AlignakObject):
'''Class to manage an item
An Item is the base of many objects of Alignak. So it defines properties that are common
to all the objects:
- name
- register: it is a real object (True) or a template definition (False)
- imported_from: source configuration file or backend
- use: templates which this object inherits from
- definition_order: priority if the same object is defined several times
- tags: the information tags attached to an object
Note: the tags are automatically set on an object when it uses some templates.
And some configuration parsing information:
- conf_is_correct: whether configuration is correct or not
- configuration_warnings: list of configuration parsing warning log
- configuration_errors: list of configuration parsing error log
common functions.
'''
def __init__(self, params, parsing=True):
pass
def __str__(self):
pass
def init_running_properties(self):
'''
Initialize the running_properties.
Each instance have own property.
:return: None
'''
pass
def copy(self):
'''
Get a copy of this item but with a new id
:return: copy of this object with a new id
:rtype: object
'''
pass
def clean(self):
'''
Clean properties only needed for initialization and configuration
:return: None
'''
pass
def get_name(self, index=False):
'''
Get the name of the item
For a template, returns the `name` attribute
For an item, return the attribute pointed to by the configured
`my_name_property` of the class if it is defined. Else, returns the `name`
property.
;:param index: to indicate if we are indexing the objects during the parsing phase
:type index: bool
:return: the object name string
:rtype: str
'''
pass
def get_full_name(self):
'''Get the full name (eg. for services or complex objects)
Default is to get the current name
:return: the object full name string
:rtype: str
'''
pass
def _get_name(self):
'''Get the name of the object
:return: the object name string
:rtype: str
'''
pass
def is_a_template(self):
'''
Check if this object is a template
:return: True if is a template, else False
:rtype: bool
'''
pass
def serialize(self, no_json=True, printing=False):
'''This function serializes into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here is the generic function that simply export attributes declared in the
properties dictionary and the running_properties of the object.
Note that no json dump happens in this method. If json dump must be done,
it will be elsewhere!
:return: Dictionary containing key and value from properties and running_properties
:rtype: dict
'''
pass
@classmethod
def load_global_conf(cls, global_configuration):
'''
Apply global Alignak configuration.
Some objects inherit some properties from the global configuration if they do not
define their own value. E.g. the global 'accept_passive_service_checks' is inherited
by the services as 'accept_passive_checks'
:param cls: parent object
:type cls: object
:param global_configuration: current object (child)
:type global_configuration: object
:return: None
'''
pass
def get_templates(self):
'''
Get list of templates this object use
:return: list of templates
:rtype: list
'''
pass
def has_plus(self, prop):
'''
Check if self.plus list have this property
:param prop: property to check
:type prop: str
:return: True is self.plus has this property, otherwise False
:rtype: bool
'''
pass
def get_all_plus_and_delete(self):
'''
Get all self.plus items of list. We copy it, delete the original and return the copy list
:return: list of self.plus
:rtype: list
'''
pass
def get_plus_and_delete(self, prop):
'''
get a copy of the property (parameter) in self.plus, delete the original and return the
value of copy
:param prop: a property
:type prop: str
:return: return the value of the property
:rtype: str
'''
pass
def add_error(self, message):
'''Add a message in the configuration errors list so we can print them
all in one place
Set the object configuration as not correct
:param message: error message or a list of messages
:type message: str | list
:return: None
'''
pass
def add_warning(self, message):
'''Add a message in the configuration warnings list so we can print them
all in one place
:param message: warning message
:type message: str
:return: None
'''
pass
def is_correct(self):
'''
Check if this object is correct
This function:
- ignores the special_properties if some exist
- checks if the required properties are defined,
:return: True if it's correct, otherwise False
:rtype: bool
'''
pass
def old_properties_names_to_new(self):
'''
This function is used by service and hosts to transform Nagios2 parameters to Nagios3
ones, like normal_check_interval to check_interval.
There is a old_parameters tab in Classes that need such modifications.
:return: None
'''
pass
def add_downtime(self, downtime):
'''
Add a downtime in this object
:param downtime: a Downtime object
:type downtime: object
:return: None
'''
pass
def del_downtime(self, downtime_id):
'''
Delete a downtime in this object
:param downtime_id: id of the downtime to delete
:type downtime_id: int
:return: None
'''
pass
def add_comment(self, comment):
'''
Add a comment to this object
:param comment: a Comment object
:type comment: object
:return: None
'''
pass
def del_comment(self, comment_id):
'''
Delete a comment in this object
:param comment_id: id of the comment to delete
:type comment_id: int
:return: None
'''
pass
def get_property_value_for_brok(self, prop, tab):
'''
Get the property of an object and brok_transformation if needed and return the value
:param prop: property name
:type prop: str
:param tab: object with all properties of an object
:type tab: object
:return: value of the property original or brok converted
:rtype: str
'''
pass
def fill_data_brok_from(self, data, brok_type):
'''
Add properties to 'data' parameter with properties of this object when 'brok_type'
parameter is defined in fill_brok of these properties
:param data: object to fill
:type data: object
:param brok_type: name of brok_type
:type brok_type: var
:return: None
'''
pass
def get_initial_status_brok(self, extra=None):
'''
Create an initial status brok
:param extra: some extra information to be added in the brok data
:type extra: dict
:return: Brok object
:rtype: alignak.Brok
'''
pass
def get_new_brok(self, name):
'''
Create a new item brok
:param name: name of the new object
:type name: str
:return: Brok object
:rtype: alignak.Brok
'''
pass
def get_update_status_brok(self):
'''
Create an update item brok
:return: Brok object
:rtype: alignak.Brok
'''
pass
def get_check_result_brok(self):
'''
Create check_result brok
:return: Brok object
:rtype: alignak.Brok
'''
pass
def get_next_schedule_brok(self):
'''
Create next_schedule (next check) brok
:return: Brok object
:rtype: alignak.Brok
'''
pass
def get_snapshot_brok(self, snap_output, exit_status):
'''
Create snapshot (check_result type) brok
:param snap_output: value of output
:type snap_output: str
:param exit_status: status of exit
:type exit_status: integer
:return: Brok object
:rtype: alignak.Brok
'''
pass
| 33 | 30 | 19 | 2 | 9 | 8 | 3 | 0.89 | 1 | 12 | 3 | 19 | 30 | 7 | 31 | 34 | 691 | 105 | 314 | 86 | 281 | 280 | 250 | 84 | 218 | 21 | 2 | 4 | 104 |
4,101 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/hostgroup.py
|
alignak.objects.hostgroup.Hostgroup
|
class Hostgroup(Itemgroup):
"""
Class to manage a group of host
A Hostgroup is used to manage a group of hosts
"""
my_type = 'hostgroup'
my_name_property = "%s_name" % my_type
members_property = "members"
group_members_property = "%s_members" % my_type
properties = Itemgroup.properties.copy()
properties.update({
'hostgroup_name':
StringProp(fill_brok=[FULL_STATUS]),
'alias':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'hostgroup_members':
ListProp(default=[], fill_brok=[FULL_STATUS], merging='join', split_on_comma=True),
'notes':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'notes_url':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'action_url':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
# Realm stuff
'realm':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
})
# properties set only for running purpose
running_properties = Itemgroup.running_properties.copy()
running_properties.update({
# Realm stuff
'realm_name':
StringProp(default=u''),
'got_default_realm':
BoolProp(default=False),
})
macros = {
'HOSTGROUPNAME': 'hostgroup_name',
'HOSTGROUPALIAS': 'alias',
'HOSTGROUPMEMBERS': 'members',
'HOSTGROUPGROUPMEMBERS': 'hostgroup_members',
'HOSTGROUPNOTES': 'notes',
'HOSTGROUPNOTESURL': 'notes_url',
'HOSTGROUPACTIONURL': 'action_url',
'HOSTGROUPREALM': 'realm_name'
}
def get_hosts(self):
"""Get the hosts of the group
:return: list of hosts
:rtype: list
"""
return super(Hostgroup, self).get_members()
def get_hostgroup_members(self):
"""Get the groups members of the group
:return: list of hosts
:rtype: list
"""
return getattr(self, 'hostgroup_members', [])
def get_hosts_by_explosion(self, hostgroups):
# pylint: disable=access-member-before-definition
"""
Get hosts of this group
:param hostgroups: Hostgroup object
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: list of hosts of this group
:rtype: list
"""
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_exploded = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[hostgroup::%s] got a loop in hostgroup definition", self.get_name())
return self.get_hosts()
# Ok, not a loop, we tag it and continue
self.rec_tag = True
hg_mbrs = self.get_hostgroup_members()
for hg_mbr in hg_mbrs:
hostgroup = hostgroups.find_by_name(hg_mbr.strip())
if hostgroup is not None:
value = hostgroup.get_hosts_by_explosion(hostgroups)
if value is not None:
self.add_members(value)
return self.get_hosts()
|
class Hostgroup(Itemgroup):
'''
Class to manage a group of host
A Hostgroup is used to manage a group of hosts
'''
def get_hosts(self):
'''Get the hosts of the group
:return: list of hosts
:rtype: list
'''
pass
def get_hostgroup_members(self):
'''Get the groups members of the group
:return: list of hosts
:rtype: list
'''
pass
def get_hosts_by_explosion(self, hostgroups):
'''
Get hosts of this group
:param hostgroups: Hostgroup object
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: list of hosts of this group
:rtype: list
'''
pass
| 4 | 4 | 16 | 2 | 6 | 8 | 2 | 0.53 | 1 | 1 | 0 | 0 | 3 | 2 | 3 | 47 | 102 | 15 | 57 | 17 | 53 | 30 | 28 | 17 | 24 | 5 | 4 | 3 | 7 |
4,102 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/commandcallitem.py
|
alignak.objects.commandcallitem.CommandCallItems
|
class CommandCallItems(Items):
"""This class provide simple methods to linkify CommandCall object.
Only object that have CommandCall attribute need those methods (so no need to define it in Item)
"""
def linkify_with_commands(self, commands, prop, is_a_list=False):
"""
Link a command to a property (check_command for example)
:param is_a_list: True if the property contains a list of commands
:type is_a_list: bool
:param commands: commands object, the list of all known commands
:type commands: alignak.objects.command.Commands
:param prop: property name
:type prop: str
:return: None
"""
for item in self:
if not getattr(item, prop, None):
# Set/force a non-existing command
setattr(item, prop, None)
continue
command_name = getattr(item, prop, None)
if not command_name:
continue
if not is_a_list:
# Set a CommandCall for the command
setattr(item, prop, self.create_commandcall(item, commands, command_name))
continue
setattr(item, prop, [])
commands_list = command_name
if not isinstance(commands_list, list):
commands_list = [commands_list]
# commands contains the configured commands list,
# Something like: [check-host-alive-parent!up!$HOSTSTATE:test_router_0$}
cmds_list = []
for command_name in commands_list:
cmds_list.append(self.create_commandcall(item, commands, command_name))
setattr(item, prop, cmds_list)
if not is_a_list:
setattr(item, prop, cmds_list[0])
@staticmethod
def create_commandcall(item, commands, command_line):
"""
Create CommandCall object with command
:param item: an item concerned with the command
:type item: alignak.objects.item.Item
:param commands: all commands
:type commands: alignak.objects.command.Commands
:param command_line: a full command line (command and arguments)
:type command_line: str
:return: a commandCall object
:rtype: alignak.objects.commandcallitem.CommandCall
"""
cc = {
'command_line': command_line.strip(),
'commands': commands
}
if hasattr(item, 'enable_environment_macros'):
cc['enable_environment_macros'] = item.enable_environment_macros
if hasattr(item, 'poller_tag'):
cc['poller_tag'] = item.poller_tag
if hasattr(item, 'reactionner_tag'):
cc['reactionner_tag'] = item.reactionner_tag
# Force parsing for object creation
return CommandCall(cc, parsing=True)
|
class CommandCallItems(Items):
'''This class provide simple methods to linkify CommandCall object.
Only object that have CommandCall attribute need those methods (so no need to define it in Item)
'''
def linkify_with_commands(self, commands, prop, is_a_list=False):
'''
Link a command to a property (check_command for example)
:param is_a_list: True if the property contains a list of commands
:type is_a_list: bool
:param commands: commands object, the list of all known commands
:type commands: alignak.objects.command.Commands
:param prop: property name
:type prop: str
:return: None
'''
pass
@staticmethod
def create_commandcall(item, commands, command_line):
'''
Create CommandCall object with command
:param item: an item concerned with the command
:type item: alignak.objects.item.Item
:param commands: all commands
:type commands: alignak.objects.command.Commands
:param command_line: a full command line (command and arguments)
:type command_line: str
:return: a commandCall object
:rtype: alignak.objects.commandcallitem.CommandCall
'''
pass
| 4 | 3 | 34 | 5 | 17 | 13 | 6 | 0.83 | 1 | 2 | 1 | 4 | 1 | 0 | 2 | 47 | 76 | 12 | 35 | 9 | 31 | 29 | 31 | 8 | 28 | 8 | 2 | 2 | 12 |
4,103 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/config.py
|
alignak.objects.config.Config
|
class Config(Item): # pylint: disable=too-many-public-methods,too-many-instance-attributes
"""Config is the class that reads, loads and manipulates the main Alignak monitored
objects configuration. It reads the Nagios legacy configuration files (cfg files )
and gets all information from these files.
It creates the monitored objects (eg. hosts, contacts, ...), creates links between
them, check them, clean them, and cut them into independent parts.
The main user of this Config class is the Arbiter daemon when it loads the
configuration and dispatches to the other daemons."""
# Next value used for auto generated instance_id
_next_id = 1
cache_path = "objects.cache"
my_type = "config"
my_name_property = "config_name"
"""Configuration properties:
* required: if True, there is not default, and the config must put them
* default: if not set, take this value
* pythonize: function call to
* class_inherit: (Service, 'blabla'): must set this configuration property to the
Service class with name blabla
If (Service, None): must set this property to the Service class with same name
* unused: just to warn the user that the option he use is no more used in Alignak
* usage_text: if present, will print it to explain why it's no more useful
---
All the properties with FULL_STATUS in the fill_brok will be included in the
'program_status' and 'update_program_status' broks.
---
"""
properties = {
# Some tuning parameters
# ----------
# When set, this parameter makes run the configuration clean once the data are
# ready to be prepared for dispatching to the daemons.
'clean_objects':
BoolProp(default=False),
# When set, this parameter makes the configuration checked for consistency between
# hostgroups and hosts realms. If hosts and their hostgroups do not belong to the
# same realm the configuration is declared as coirrupted
'forced_realms_hostgroups':
BoolProp(default=True),
# -----
# Included in the program status brok raised for the scheduler live state
# -----
# Used for the ALIGNAK macro
# Alignak instance name is set as the arbiter name
# if it is not defined in the configuration file
'alignak_name':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'alignak_env':
ListProp(default=[], fill_brok=[FULL_STATUS]),
# Configuration identification - instance id and name
'instance_id':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'config_name':
StringProp(default=u'Main configuration', fill_brok=[FULL_STATUS]),
'program_start':
IntegerProp(default=0, fill_brok=[FULL_STATUS]),
'last_alive':
IntegerProp(default=0),
'last_log_rotation':
IntegerProp(default=0),
'last_command_check':
IntegerProp(default=0),
'pid':
IntegerProp(default=0),
'is_running':
BoolProp(default=True),
'modified_host_attributes':
IntegerProp(default=0, fill_brok=[FULL_STATUS]),
'modified_service_attributes':
IntegerProp(default=0, fill_brok=[FULL_STATUS]),
'daemon_mode':
BoolProp(default=True),
# -----
# Those are not valid parameters ...
# 'passive_host_checks_enabled':
# BoolProp(default=True, fill_brok=[FULL_STATUS]),
# 'passive_service_checks_enabled':
# BoolProp(default=True, fill_brok=[FULL_STATUS]),
# 'active_host_checks_enabled':
# BoolProp(default=True, fill_brok=[FULL_STATUS]),
# 'active_service_checks_enabled':
# BoolProp(default=True, fill_brok=[FULL_STATUS]),
# 'event_handlers_enabled':
# BoolProp(default=True, fill_brok=[FULL_STATUS]),
# 'flap_detection_enabled':
# BoolProp(default=True, fill_brok=[FULL_STATUS]),
# 'notifications_enabled':
# BoolProp(default=True, fill_brok=[FULL_STATUS]),
# Used for the MAINCONFIGFILE, CONFIGFILES and CONFIGBASEDIR macros
# will be set when we will load a file
'config_files':
ListProp(default=[], fill_brok=[FULL_STATUS]),
'main_config_file':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'config_base_dir':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
# # Triggers directory
# 'triggers_dir':
# UnusedProp(text=NOT_MANAGED),
# Packs directory
'packs_dir':
UnusedProp(text=NOT_MANAGED),
# Inner objects cache file for Nagios CGI
'object_cache_file':
UnusedProp(text=NOT_MANAGED),
'precached_object_file':
UnusedProp(text=NOT_MANAGED),
# Unused Nagios configuration parameter
'resource_file':
UnusedProp(text=NOT_MANAGED),
# Unused Nagios configuration parameter
'temp_file':
UnusedProp(text=NOT_MANAGED),
'temp_path':
UnusedProp(text=NOT_MANAGED),
# Inner retention self created module parameter
'status_file':
UnusedProp(text=NO_LONGER_USED),
'status_update_interval':
UnusedProp(text=NO_LONGER_USED),
# Enable the notifications
'enable_notifications':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None), (Contact, None)]),
# Service checks
'execute_service_checks':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Service, 'execute_checks')]),
'accept_passive_service_checks':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Service, 'accept_passive_checks')]),
# Host checks
'execute_host_checks':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, 'execute_checks')]),
'accept_passive_host_checks':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, 'accept_passive_checks')]),
# Accept passive checks for unknown host/service
'accept_passive_unknown_check_results':
BoolProp(default=True, fill_brok=[FULL_STATUS]),
# Enable event handlers
'enable_event_handlers':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
# Inner log self created module parameter
'log_file':
UnusedProp(text=NOT_MANAGED),
'log_rotation_method':
UnusedProp(text=NOT_MANAGED),
'log_archive_path':
UnusedProp(text=NOT_MANAGED),
# Inner external commands self created module parameter
'check_external_commands':
BoolProp(default=True, fill_brok=[FULL_STATUS]),
'command_check_interval':
UnusedProp(text=u'Alignak will always check for external commands. '
u'This configuration value is useless.'),
'command_file':
StringProp(default=u''),
'external_command_buffer_slots':
UnusedProp(text=u'Alignak do not limit the external commands slot.'),
# Application updates checks
'check_for_updates':
UnusedProp(text=u'network administrators will never allow such communication between '
u'server and the external world. Use your distribution packet manager '
u'to know if updates are available or go to the '
u'http://www.github.com/Alignak-monitoring/alignak website instead.'),
'bare_update_checks':
UnusedProp(text=None),
# -----
# Inner state retention module parameters
'retain_state_information':
BoolProp(default=True, fill_brok=[FULL_STATUS]),
'state_retention_dir':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'state_retention_file':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'retention_update_interval':
IntegerProp(default=0, fill_brok=[FULL_STATUS]),
'use_retained_program_state':
UnusedProp(text=NOT_INTERESTING),
'use_retained_scheduling_info':
UnusedProp(text=NOT_INTERESTING),
'retained_host_attribute_mask':
UnusedProp(text=NOT_INTERESTING),
'retained_service_attribute_mask':
UnusedProp(text=NOT_INTERESTING),
'retained_process_host_attribute_mask':
UnusedProp(text=NOT_INTERESTING),
'retained_process_service_attribute_mask':
UnusedProp(text=NOT_INTERESTING),
'retained_contact_host_attribute_mask':
UnusedProp(text=NOT_INTERESTING),
'retained_contact_service_attribute_mask':
UnusedProp(text=NOT_INTERESTING),
# -----
# Inner syslog self created module parameters
'use_syslog':
BoolProp(default=False),
# Monitoring logs (Alignak events log) configuration
'events_date_format':
StringProp(default='%Y-%m-%d %H:%M:%S', fill_brok=[FULL_STATUS]),
'events_log_count':
IntegerProp(default=100, fill_brok=[FULL_STATUS]),
'log_filename':
StringProp(default='alignak-events.log'),
# Override log level - default is to not change anything
'log_level':
StringProp(default=''),
'log_rotation_when':
StringProp(default='midnight'),
'log_rotation_interval':
IntegerProp(default=1),
'log_rotation_count':
IntegerProp(default=365),
'log_format':
StringProp(default='[%(my_date)s] %(levelname)s: %(message)s'),
'log_date':
StringProp(default='%Y-%m-%d %H:%M:%S'),
'log_notifications':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
'log_alerts':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
'log_acknowledgements':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
'log_downtimes':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
'log_event_handlers':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
'log_snapshots':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
'log_flappings':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
'log_initial_states':
BoolProp(default=False, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
'log_external_commands':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
'log_passive_checks':
BoolProp(default=False, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
'log_active_checks':
BoolProp(default=False, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
'log_alignak_checks':
BoolProp(default=False, fill_brok=[FULL_STATUS]),
# Global event handlers
'global_host_event_handler':
StringProp(default='', fill_brok=[FULL_STATUS],
brok_transformation=to_name_if_possible,
class_inherit=[(Host, 'global_event_handler')]),
'global_service_event_handler':
StringProp(default='', fill_brok=[FULL_STATUS],
brok_transformation=to_name_if_possible,
class_inherit=[(Service, 'global_event_handler')]),
'sleep_time':
UnusedProp(text=u'This deprecated option is useless in the alignak way of doing.'),
'service_inter_check_delay_method':
UnusedProp(text=u'This option is useless in the Alignak scheduling. '
'The only way is the smart way.'),
'max_service_check_spread':
IntegerProp(default=5, class_inherit=[(Service, 'max_check_spread')]),
'service_interleave_factor':
UnusedProp(text=u'This option is useless in the Alignak scheduling '
'because it use a random distribution for initial checks.'),
'max_concurrent_checks':
UnusedProp(text=u'Limiting the max concurrent checks is not helpful '
'to got a good running monitoring server.'),
'check_result_reaper_frequency':
UnusedProp(text=u'Alignak do not use reaper process.'),
'max_check_result_reaper_time':
UnusedProp(text=u'Alignak do not use reaper process.'),
'check_result_path':
UnusedProp(text=u'Alignak use in memory returns, not check results on flat file.'),
'max_check_result_file_age':
UnusedProp(text=u'Alignak do not use flat file check resultfiles.'),
'host_inter_check_delay_method':
UnusedProp(text=u'This option is unused in the Alignak scheduling because distribution '
'of the initial check is a random one.'),
'max_host_check_spread':
IntegerProp(default=5, fill_brok=[FULL_STATUS],
class_inherit=[(Host, 'max_check_spread')]),
'interval_length':
IntegerProp(default=60, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
# Todo: not used anywhere in the source code
'auto_reschedule_checks':
BoolProp(managed=False, default=True),
# Todo: not used anywhere in the source code
'auto_rescheduling_interval':
IntegerProp(managed=False, default=1),
# Todo: not used anywhere in the source code
'auto_rescheduling_window':
IntegerProp(managed=False, default=180),
'translate_passive_host_checks':
UnusedProp(text=u'Alignak passive checks management makes this parameter unuseful.'),
'passive_host_checks_are_soft':
UnusedProp(text=u'Alignak passive checks management makes this parameter unuseful.'),
# Todo: not used anywhere in the source code
'enable_predictive_host_dependency_checks':
BoolProp(managed=False,
default=True,
class_inherit=[(Host, 'enable_predictive_dependency_checks')]),
# Todo: not used anywhere in the source code
'enable_predictive_service_dependency_checks':
BoolProp(managed=False, default=True),
# Todo: not used anywhere in the source code
'cached_host_check_horizon':
IntegerProp(default=0, class_inherit=[(Host, 'cached_check_horizon')]),
# Todo: not used anywhere in the source code
'cached_service_check_horizon':
IntegerProp(default=0, class_inherit=[(Service, 'cached_check_horizon')]),
'use_large_installation_tweaks':
UnusedProp(text=u'this option is deprecated because in alignak it is just an alias '
u'for enable_environment_macros=False'),
'free_child_process_memory':
UnusedProp(text=u'this option is automatic in Python processes'),
'child_processes_fork_twice':
UnusedProp(text=u'fork twice is not used.'),
'enable_environment_macros':
BoolProp(default=False, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
# Flapping management
'enable_flap_detection':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
'low_service_flap_threshold':
IntegerProp(default=20, fill_brok=[FULL_STATUS],
class_inherit=[(Service, 'global_low_flap_threshold')]),
'high_service_flap_threshold':
IntegerProp(default=30, fill_brok=[FULL_STATUS],
class_inherit=[(Service, 'global_high_flap_threshold')]),
'low_host_flap_threshold':
IntegerProp(default=20, fill_brok=[FULL_STATUS],
class_inherit=[(Host, 'global_low_flap_threshold')]),
'high_host_flap_threshold':
IntegerProp(default=30, fill_brok=[FULL_STATUS],
class_inherit=[(Host, 'global_high_flap_threshold')]),
'flap_history':
IntegerProp(default=20, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
# Todo: not used anywhere in the source code
'soft_state_dependencies':
BoolProp(managed=False, default=False),
# Check timeout
'service_check_timeout':
IntegerProp(default=60, class_inherit=[(Service, 'check_timeout')]),
'host_check_timeout':
IntegerProp(default=30, class_inherit=[(Host, 'check_timeout')]),
'timeout_exit_status':
IntegerProp(default=2),
'event_handler_timeout':
IntegerProp(default=30, class_inherit=[(Host, None), (Service, None)]),
'notification_timeout':
IntegerProp(default=30, class_inherit=[(Host, None), (Service, None)]),
# Performance data management
'perfdata_timeout':
IntegerProp(default=5, class_inherit=[(Host, None), (Service, None)]),
'process_performance_data':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
'host_perfdata_command':
StringProp(default='', fill_brok=[FULL_STATUS],
brok_transformation=to_name_if_possible,
class_inherit=[(Host, 'perfdata_command')]),
'service_perfdata_command':
StringProp(default='', fill_brok=[FULL_STATUS],
brok_transformation=to_name_if_possible,
class_inherit=[(Service, 'perfdata_command')]),
# Inner perfdata self created module parameters
'host_perfdata_file':
StringProp(default='', fill_brok=[FULL_STATUS]),
'service_perfdata_file':
StringProp(default='', fill_brok=[FULL_STATUS]),
'host_perfdata_file_template':
StringProp(managed=False, default='/tmp/host.perf',
_help='Smartly replaced with the Alignak inner metrics feature or backend.'),
'service_perfdata_file_template':
StringProp(managed=False, default='/tmp/host.perf',
_help='Smartly replaced with the Alignak '
'inner metrics feature or backend.'),
'host_perfdata_file_mode':
CharProp(managed=False, default='a',
_help='Smartly replaced with the Alignak '
'inner metrics feature or backend.'),
'service_perfdata_file_mode':
CharProp(managed=False, default='a',
_help='Smartly replaced with the Alignak inner metrics feature or backend.'),
'host_perfdata_file_processing_interval':
IntegerProp(managed=False, default=15,
_help='Smartly replaced with the Alignak '
'inner metrics feature or backend.'),
'service_perfdata_file_processing_interval':
IntegerProp(managed=False, default=15,
_help='Smartly replaced with the Alignak '
'inner metrics feature or backend.'),
'host_perfdata_file_processing_command':
StringProp(managed=False, default=None,
_help='Smartly replaced with the Alignak inner metrics feature or backend.'),
'service_perfdata_file_processing_command':
StringProp(managed=False, default=None,
_help='Smartly replaced with the Alignak inner metrics feature or backend.'),
# Hosts/services orphanage check
'check_for_orphaned_services':
BoolProp(default=True, class_inherit=[(Service, 'check_for_orphaned')]),
'check_for_orphaned_hosts':
BoolProp(default=True, class_inherit=[(Host, 'check_for_orphaned')]),
# Freshness checks
'check_service_freshness':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Service, 'global_check_freshness')]),
'service_freshness_check_interval':
IntegerProp(default=60, fill_brok=[FULL_STATUS]),
'check_host_freshness':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, 'global_check_freshness')]),
'host_freshness_check_interval':
IntegerProp(default=60, fill_brok=[FULL_STATUS]),
'additional_freshness_latency':
IntegerProp(default=15, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
'enable_embedded_perl':
BoolProp(managed=False,
default=True,
_help='It will surely never be managed, '
'but it should not be useful with poller performances.'),
'use_embedded_perl_implicitly':
BoolProp(managed=False, default=False),
# Todo: not used anywhere in the source code
'date_format':
StringProp(managed=False, default=None),
'use_timezone':
StringProp(default='', fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None), (Contact, None)]),
'illegal_object_name_chars':
StringProp(default="""`~!$%^&*"|'<>?,()=""", fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None),
(Contact, None), (HostExtInfo, None)]),
'illegal_macro_output_chars':
StringProp(default='', fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None), (Contact, None)]),
'env_variables_prefix':
StringProp(default='ALIGNAK_', fill_brok=[FULL_STATUS]),
'use_regexp_matching':
BoolProp(managed=False,
default=False,
_help='If you have some host or service definition like prod*, '
'it will surely fail from now, sorry.'),
'use_true_regexp_matching':
BoolProp(managed=False, default=None),
'admin_email':
UnusedProp(text=u'sorry, not yet implemented.'),
'admin_pager':
UnusedProp(text=u'sorry, not yet implemented.'),
'event_broker_options':
UnusedProp(text=u'event broker are replaced by modules '
u'with a real configuration template.'),
'broker_module':
UnusedProp(text=u'event broker are replaced by modules '
u'with a real configuration template.'),
'modified_attributes':
IntegerProp(default=0),
'daemon_thread_pool_size':
IntegerProp(default=8, fill_brok=[FULL_STATUS]),
'max_plugins_output_length':
IntegerProp(default=8192, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
'no_event_handlers_during_downtimes':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
# Interval between cleaning queues pass
'cleaning_queues_interval':
IntegerProp(default=900, fill_brok=[FULL_STATUS]),
# Now for problem/impact states changes
'enable_problem_impacts_states_change':
BoolProp(default=True, fill_brok=[FULL_STATUS],
class_inherit=[(Host, None), (Service, None)]),
# More a running value indeed - the macros catched in the parsed configuration
'resource_macros_names':
ListProp(default=[], fill_brok=[FULL_STATUS]),
'runners_timeout':
IntegerProp(default=3600),
# Self created daemons configuration
'launch_missing_daemons':
BoolProp(default=False, fill_brok=[FULL_STATUS]),
'daemons_arguments':
StringProp(default='', fill_brok=[FULL_STATUS]),
'daemons_log_folder':
StringProp(default='/usr/local/var/log/alignak',
fill_brok=[FULL_STATUS]),
'daemons_initial_port':
IntegerProp(default=10000,
fill_brok=[FULL_STATUS]),
# Kill launched daemons on communication failure
'daemons_failure_kill':
BoolProp(default=True, fill_brok=[FULL_STATUS]),
'daemons_check_period':
IntegerProp(default=5, fill_brok=[FULL_STATUS]),
'daemons_start_timeout':
IntegerProp(default=1, fill_brok=[FULL_STATUS]),
'daemons_new_conf_timeout':
IntegerProp(default=1, fill_brok=[FULL_STATUS]),
'daemons_dispatch_timeout':
IntegerProp(default=5, fill_brok=[FULL_STATUS]),
'daemons_stop_timeout':
IntegerProp(default=5, fill_brok=[FULL_STATUS]),
}
macros = {
'ALIGNAK': 'alignak_name',
'ALIGNAK_CONFIG': 'alignak_env',
'CONFIGFILES': 'config_files',
'MAINCONFIGFILE': 'main_config_file',
'MAINCONFIGDIR': 'config_base_dir',
'RETENTION_FILE': 'state_retention_file',
# The following one are Nagios specific features...
'STATUSDATAFILE': '',
'COMMENTDATAFILE': '',
'DOWNTIMEDATAFILE': '',
'RETENTIONDATAFILE': '',
'OBJECTCACHEFILE': '',
'TEMPFILE': '',
'TEMPPATH': '',
'LOGFILE': '',
'RESOURCEFILE': '',
'COMMANDFILE': '',
'HOSTPERFDATAFILE': '',
'SERVICEPERFDATAFILE': '',
'ADMINEMAIL': '',
'ADMINPAGER': ''
}
# To create dict of objects from the raw objects got from files or backend
# Dictionary: objects type: {
# Class of object,
# Class of objects list,
# 'name of the Config property for the objects',
# True to create an initial index,
# True if the property is clonable
# }
types_creations = {
'timeperiod':
(Timeperiod, Timeperiods, 'timeperiods', True, True),
'service':
(Service, Services, 'services', False, True),
'servicegroup':
(Servicegroup, Servicegroups, 'servicegroups', True, True),
'command':
(Command, Commands, 'commands', True, True),
'host':
(Host, Hosts, 'hosts', True, True),
'hostgroup':
(Hostgroup, Hostgroups, 'hostgroups', True, True),
'contact':
(Contact, Contacts, 'contacts', True, True),
'contactgroup':
(Contactgroup, Contactgroups, 'contactgroups', True, True),
'notificationway':
(NotificationWay, NotificationWays, 'notificationways', True, True),
'checkmodulation':
(CheckModulation, CheckModulations, 'checkmodulations', True, True),
'macromodulation':
(MacroModulation, MacroModulations, 'macromodulations', True, True),
'servicedependency':
(Servicedependency, Servicedependencies, 'servicedependencies', True, True),
'hostdependency':
(Hostdependency, Hostdependencies, 'hostdependencies', True, True),
'arbiter':
(ArbiterLink, ArbiterLinks, 'arbiters', True, False),
'scheduler':
(SchedulerLink, SchedulerLinks, 'schedulers', True, False),
'reactionner':
(ReactionnerLink, ReactionnerLinks, 'reactionners', True, False),
'broker':
(BrokerLink, BrokerLinks, 'brokers', True, False),
'receiver':
(ReceiverLink, ReceiverLinks, 'receivers', True, False),
'poller':
(PollerLink, PollerLinks, 'pollers', True, False),
'realm':
(Realm, Realms, 'realms', True, True),
'module':
(Module, Modules, 'modules', True, False),
'resultmodulation':
(Resultmodulation, Resultmodulations, 'resultmodulations', True, True),
'businessimpactmodulation':
(Businessimpactmodulation, Businessimpactmodulations, 'businessimpactmodulations',
True, True),
'escalation':
(Escalation, Escalations, 'escalations', True, True),
'serviceescalation':
(Serviceescalation, Serviceescalations, 'serviceescalations', False, False),
'hostescalation':
(Hostescalation, Hostescalations, 'hostescalations', False, False),
'hostextinfo':
(HostExtInfo, HostsExtInfo, 'hostsextinfo', True, False),
'serviceextinfo':
(ServiceExtInfo, ServicesExtInfo, 'servicesextinfo', True, False),
}
# This tab is used to transform old parameters name into new ones
# so from Nagios2 format, to Nagios3 ones
old_properties = {
'nagios_user': 'alignak_user',
'nagios_group': 'alignak_group'
}
read_config_silent = False
early_created_types = ['arbiter', 'module']
configuration_types = ['void', 'timeperiod', 'command',
'realm',
'host', 'hostgroup', 'hostdependency', 'hostextinfo',
'service', 'servicegroup', 'servicedependency', 'serviceextinfo',
'contact', 'contactgroup',
'notificationway', 'escalation', 'serviceescalation', 'hostescalation',
'checkmodulation', 'macromodulation', 'resultmodulation',
'businessimpactmodulation',
'arbiter', 'scheduler', 'reactionner', 'broker', 'receiver', 'poller',
'module']
def __init__(self, params=None, parsing=True):
if params is None:
params = {}
if parsing:
# Create a new configuration identifier
self.instance_id = u'%s_%d' % (self.__class__.__name__, self.__class__._next_id)
self.__class__._next_id += 1
# Compute the "USER" properties and macros
for i in range(1, 65):
if '$USER%d$' % i in self.__class__.properties:
continue
self.__class__.macros['USER%d' % i] = '$USER%s$' % i
self.__class__.properties['$USER%d$' % i] = StringProp(default='')
elif 'instance_id' not in params:
logger.error("When not parsing a configuration, an instance_id "
"must exist in the provided parameters for a configuration!")
else:
self.instance_id = params['instance_id']
# Un serialize objects lists
for _, _, strclss, _, _ in list(self.types_creations.values()):
if strclss in ['arbiters', 'schedulers', 'brokers',
'pollers', 'reactionners', 'receivers']:
continue
if strclss not in params:
continue
setattr(self, strclss, unserialize(params[strclss]))
del params[strclss]
for prop in ['host_perfdata_command', 'service_perfdata_command',
'global_host_event_handler', 'global_service_event_handler']:
if prop not in params or params[prop] is None:
continue
setattr(self, prop, unserialize(params[prop]))
del params[prop]
super(Config, self).__init__(params, parsing=parsing)
self.fill_default()
# self.params = {}
self.resource_macros_names = []
# The configuration files I read
self.my_cfg_files = []
# By default the conf is correct and the warnings and errors lists are empty
self.conf_is_correct = True
self.configuration_warnings = []
self.configuration_errors = []
# We tag the conf with a magic_hash, a random value to
# identify this conf
random.seed(time.time())
self.magic_hash = random.randint(1, 100000)
# Store daemons detected as missing during the configuration check
self.missing_daemons = []
def __repr__(self): # pragma: no cover
return '<%s %s - %s />' % (self.__class__.__name__, self.instance_id,
getattr(self, 'config_name', 'unknown'))
__str__ = __repr__
def serialize(self, no_json=True, printing=False):
logger.debug("Serializing the configuration: %s", self)
res = super(Config, self).serialize(no_json=no_json, printing=printing)
# The following are not in properties so not in the dict
# todo: may be using an 'objects' dictionary for all the objects?
for _, _, strclss, _, _ in list(self.types_creations.values()):
if strclss in ['arbiters', 'schedulers', 'brokers',
'pollers', 'reactionners', 'receivers']:
continue
if getattr(self, strclss, None) is None:
logger.debug("- no %s", strclss)
continue
items = getattr(self, strclss)
logger.debug("- %d %s", len(items), strclss)
res[strclss] = serialize(items, no_json=no_json, printing=printing)
# # Some special properties
# todo: comment because it is still managed in the serialize function!
# for prop in ['host_perfdata_command', 'service_perfdata_command',
# 'host_perfdata_file_processing_command',
# 'service_perfdata_file_processing_command',
# 'global_host_event_handler', 'global_service_event_handler']:
# # res[prop] = None
# # if getattr(self, prop, None) not in [None, '', 'None']:
# # res[prop] = serialize(getattr(self, prop))
# res[prop] = serialize(getattr(self, prop, None),
# no_json=no_json, printing=printing)
res['macros'] = self.macros
return res
def clean_params(self, params):
"""Convert a list of parameters (key=value) into a dict
This function is used to transform Nagios (or ini) like formated parameters (key=value)
to a dictionary.
:param params: parameters list
:type params: list
:return: dict with key and value. Log error if malformed
:rtype: dict
"""
clean_p = {}
for elt in params:
elts = elt.split('=', 1)
if len(elts) == 1: # error, there is no = !
self.add_error("the parameter %s is malformed! (no = sign)" % elts[0])
else:
# if elts[1] == '':
# self.add_warning("the parameter %s is ambiguous! "
# "No value after =, assuming an empty string" % elts[0])
clean_p[elts[0]] = elts[1]
return clean_p
def load_params(self, params):
"""Load parameters from main configuration file
:param params: parameters list (converted right at the beginning)
:type params:
:return: None
"""
logger.debug("Alignak parameters:")
for key, value in sorted(self.clean_params(params).items()):
update_attribute = None
# Maybe it's a variable as $USER$ or $ANOTHERVARIABLE$
# so look at the first character. If it's a $, it is a macro variable
# if it ends with $ too
if key[0] == '$' and key[-1] == '$':
key = key[1:-1]
# Update the macros list
if key not in self.__class__.macros:
logger.debug("New macro %s: %s - %s", self, key, value)
self.__class__.macros[key] = '$%s$' % key
key = '$%s$' % key
logger.debug("- macro %s", key)
update_attribute = value
# Create a new property to store the macro value
if isinstance(value, list):
self.__class__.properties[key] = ListProp(default=value)
else:
self.__class__.properties[key] = StringProp(default=value)
elif key in self.properties:
update_attribute = self.properties[key].pythonize(value)
elif key in self.running_properties:
logger.warning("using a the running property %s in a config file", key)
update_attribute = self.running_properties[key].pythonize(value)
elif key.startswith('$') or key in ['cfg_file', 'cfg_dir']:
# it's a macro or a useless now param, we don't touch this
update_attribute = value
else:
logger.debug("Guessing the property '%s' type because it "
"is not in %s object properties", key, self.__class__.__name__)
update_attribute = ToGuessProp().pythonize(value)
if update_attribute is not None:
setattr(self, key, update_attribute)
logger.debug("- update %s = %s", key, update_attribute)
# Change Nagios2 names to Nagios3 ones (before using them)
self.old_properties_names_to_new()
# Fill default for myself - new properties entry becomes a self attribute
self.fill_default()
@staticmethod
def _cut_line(line):
"""Split the line on whitespaces and remove empty chunks
:param line: the line to split
:type line: str
:return: list of strings
:rtype: list
"""
# punct = '"#$%&\'()*+/<=>?@[\\]^`{|}~'
if re.search("([\t\n\r]+|[\x0b\x0c ]{3,})+", line):
tmp = re.split("([\t\n\r]+|[\x0b\x0c ]{3,})+", line, 1)
else:
tmp = re.split("[" + string.whitespace + "]+", line, 1)
res = [elt.strip() for elt in tmp if elt.strip() != '']
return res
def read_legacy_cfg_files(self, cfg_files, alignak_env_files=None):
# pylint: disable=too-many-nested-blocks,too-many-statements
# pylint: disable=too-many-branches, too-many-locals
"""Read and parse the Nagios legacy configuration files
and store their content into a StringIO object which content
will be returned as the function result
:param cfg_files: list of file to read
:type cfg_files: list
:param alignak_env_files: name of the alignak environment file
:type alignak_env_files: list
:return: a buffer containing all files
:rtype: str
"""
cfg_buffer = ''
if not cfg_files:
return cfg_buffer
# Update configuration with the first legacy configuration file name and path
# This will update macro properties
self.alignak_env = 'n/a'
if alignak_env_files is not None:
self.alignak_env = alignak_env_files
if not isinstance(alignak_env_files, list):
self.alignak_env = [os.path.abspath(alignak_env_files)]
else:
self.alignak_env = [os.path.abspath(f) for f in alignak_env_files]
self.main_config_file = os.path.abspath(cfg_files[0])
self.config_base_dir = os.path.dirname(self.main_config_file)
# Universal newline mode (all new lines are managed internally)
res = StringIO(u"# Configuration cfg_files buffer", newline=None)
if not self.read_config_silent and cfg_files:
logger.info("Reading the configuration cfg_files...")
# A first pass to get all the configuration cfg_files in a buffer
for cfg_file in cfg_files:
# Make sure the configuration cfg_files are not repeated...
if os.path.abspath(cfg_file) in self.my_cfg_files:
logger.warning("- ignoring repeated file: %s", os.path.abspath(cfg_file))
continue
self.my_cfg_files.append(os.path.abspath(cfg_file))
# File header
res.write(u"\n")
res.write(u"# imported_from=%s" % cfg_file)
res.write(u"\n")
if not self.read_config_silent:
logger.info("- opening '%s' configuration file", cfg_file)
try:
# Open in Universal way for Windows, Mac, Linux-based systems
file_d = open(cfg_file, 'r')
buf = file_d.readlines()
file_d.close()
except IOError as exp:
self.add_error("cannot open main file '%s' for reading: %s" % (cfg_file, exp))
continue
for line in buf:
try:
line = line.decode('utf8', 'replace')
except AttributeError:
# Python 3 will raise an exception because the line is still unicode
pass
line = line.strip()
res.write(line)
res.write(u"\n")
if (re.search("^cfg_file", line) or re.search("^resource_file", line)) \
and '=' in line:
elts = line.split('=', 1)
if os.path.isabs(elts[1]):
cfg_file_name = elts[1]
else:
cfg_file_name = os.path.join(self.config_base_dir, elts[1])
cfg_file_name = cfg_file_name.strip()
cfg_file_name = os.path.abspath(cfg_file_name)
# Make sure the configuration cfg_files are not repeated...
if cfg_file_name in self.my_cfg_files:
logger.warning("- ignoring repeated file: %s", cfg_file_name)
else:
self.my_cfg_files.append(cfg_file_name)
if not self.read_config_silent:
logger.info(" reading: %s", cfg_file_name)
try:
# Read the file content to the buffer
file_d = open(cfg_file_name, 'r')
# File header
res.write(u"\n")
res.write(u"# imported_from=%s" % cfg_file_name)
res.write(u"\n")
content = file_d.read()
try:
content = content.decode('utf8', 'replace')
except AttributeError:
# Python 3 will raise an exception
pass
res.write(content)
res.write(u"\n")
file_d.close()
except IOError as exp:
self.add_error(u"cannot open file '%s' for reading: %s"
% (cfg_file_name, exp))
elif re.search("^cfg_dir", line) and '=' in line:
elts = line.split('=', 1)
if os.path.isabs(elts[1]):
cfg_dir_name = elts[1]
else:
cfg_dir_name = os.path.join(self.config_base_dir, elts[1])
# Ok, look if it's really a directory
if not os.path.isdir(cfg_dir_name):
self.add_error(u"cannot open directory '%s' for reading" % cfg_dir_name)
continue
# Now walk for it.
for root, _, walk_files in os.walk(cfg_dir_name, followlinks=True):
for found_file in walk_files:
if not re.search(r"\.cfg$", found_file):
continue
cfg_file_name = os.path.join(root, found_file)
# Make sure the configuration cfg_files are not repeated...
if os.path.abspath(cfg_file_name) in self.my_cfg_files:
logger.warning("- ignoring repeated file: %s", cfg_file_name)
else:
self.my_cfg_files.append(cfg_file_name)
if not self.read_config_silent:
logger.info(" reading: %s", cfg_file_name)
try:
# Read the file content to the buffer
file_d = open(cfg_file_name, 'r')
# File header
res.write(u"\n")
res.write(u"# imported_from=%s" % cfg_file_name)
res.write(u"\n")
content = file_d.read()
try:
content = content.decode('utf8', 'replace')
except AttributeError:
# Python 3 will raise an exception
pass
res.write(content)
res.write(u"\n")
file_d.close()
except IOError as exp:
self.add_error(u"cannot open file '%s' for reading: %s"
% (cfg_file_name, exp))
cfg_buffer = res.getvalue()
res.close()
return cfg_buffer
def read_config_buf(self, cfg_buffer):
# pylint: disable=too-many-locals, too-many-branches
"""The legacy configuration buffer (previously returned by Config.read_config())
If the buffer is empty, it will return an empty dictionary else it will return a
dictionary containing dictionary items tha tmay be used to create Alignak
objects
:param cfg_buffer: buffer containing all data from config files
:type cfg_buffer: str
:return: dict of alignak objects with the following structure ::
{ type1 : [{key: value, ..}, {..}],
type2 : [ ... ]
}
Example ::
{ 'host' : [{'host_name': 'myhostname', ..}, {..}],
'service' : [ ... ]
}
Values are all str for now. It is pythonized at object creation
:rtype: dict
"""
objects = {}
if not self.read_config_silent:
if cfg_buffer:
logger.info("Parsing the legacy configuration files...")
else:
logger.info("No legacy configuration files.")
return objects
params = []
objectscfg = {}
for o_type in self.__class__.configuration_types:
objectscfg[o_type] = []
tmp = []
tmp_type = 'void'
in_define = False
almost_in_define = False
continuation_line = False
tmp_line = ''
lines = cfg_buffer.split('\n')
# Keep the line number for the file path
line_nb = 0
file_from = ''
for line in lines:
if line.startswith("# imported_from="):
file_from = line.split('=')[1]
line_nb = 0 # reset the line number too
if not self.read_config_silent:
logger.debug("#####\n# file: %s", file_from)
continue
if not self.read_config_silent:
logger.debug("- %d: %s", line_nb, line)
line_nb += 1
# Remove comments
line = split_semicolon(line)[0].strip()
# A backslash means, there is more to come
if re.search(r"\\\s*$", line) is not None:
continuation_line = True
line = re.sub(r"\\\s*$", "", line)
line = re.sub(r"^\s+", " ", line)
tmp_line += line
continue
elif continuation_line:
# Now the continuation line is complete
line = re.sub(r"^\s+", "", line)
line = tmp_line + line
tmp_line = ''
continuation_line = False
# } alone in a line means stop the object reading
if re.search(r"^\s*}\s*$", line) is not None:
in_define = False
# { alone in a line can mean start object reading
if re.search(r"^\s*\{\s*$", line) is not None and almost_in_define:
almost_in_define = False
in_define = True
continue
if re.search(r"^\s*#|^\s*$|^\s*}", line) is not None:
pass
# A define must be catch and the type saved
# The old entry must be saved before
elif re.search("^define", line) is not None:
if re.search(r".*\{.*$", line) is not None: # pylint: disable=R0102
in_define = True
else:
almost_in_define = True
# Save previous object definition
if tmp_type not in objectscfg:
objectscfg[tmp_type] = []
objectscfg[tmp_type].append(tmp)
# Start a new object definition
tmp = []
imported_from = u"imported_from %s:%s" % (file_from, line_nb)
tmp.append(imported_from)
# Get new type
elts = re.split(r'\s', line)
# Maybe there was space before and after the type
# so we must get all and strip it
tmp_type = ' '.join(elts[1:]).strip()
tmp_type = tmp_type.split('{')[0].strip()
else:
if in_define:
tmp.append(line)
else:
params.append(line)
# Maybe the type of the last element is unknown, declare it
if tmp_type not in objectscfg:
objectscfg[tmp_type] = []
objectscfg[tmp_type].append(tmp)
# Check and load the parameters
self.load_params(params)
for o_type in objectscfg:
objects[o_type] = []
for items in objectscfg[o_type]:
tmp_obj = {}
for line in items:
elts = self._cut_line(line)
if not elts:
continue
prop = elts[0]
if prop not in tmp_obj:
tmp_obj[prop] = []
value = ' '.join(elts[1:])
tmp_obj[prop].append(value)
if tmp_obj != {}:
# Create a new object
objects[o_type].append(tmp_obj)
return objects
@staticmethod
def add_self_defined_objects(raw_objects):
"""Add self defined command objects for internal processing ;
bp_rule, _internal_host_up, _echo, _internal_host_check, _interna_service_check
:param raw_objects: Raw config objects dict
:type raw_objects: dict
:return: raw_objects with some more commands
:rtype: dict
"""
logger.info("- creating internally defined commands...")
if 'command' not in raw_objects:
raw_objects['command'] = []
# Business rule
raw_objects['command'].append({
'command_name': 'bp_rule',
'command_line': 'bp_rule'
})
# Internal host checks
raw_objects['command'].append({
'command_name': '_internal_host_up',
'command_line': '_internal_host_up'
})
raw_objects['command'].append({
'command_name': '_internal_host_check',
# Command line must contain: state_id;output
'command_line': '_internal_host_check;$ARG1$;$ARG2$'
})
# Internal service check
raw_objects['command'].append({
'command_name': '_echo',
'command_line': '_echo'
})
raw_objects['command'].append({
'command_name': '_internal_service_check',
# Command line must contain: state_id;output
'command_line': '_internal_service_check;$ARG1$;$ARG2$'
})
def early_create_objects(self, raw_objects):
"""Create the objects needed for the post configuration file initialization
:param raw_objects: dict with all object with str values
:type raw_objects: dict
:return: None
"""
types_creations = self.__class__.types_creations
early_created_types = self.__class__.early_created_types
logger.info("Creating objects...")
for o_type in sorted(types_creations):
if o_type in early_created_types:
self.create_objects_for_type(raw_objects, o_type)
logger.info("Done")
def create_objects(self, raw_objects):
"""Create all the objects got after the post configuration file initialization
:param raw_objects: dict with all object with str values
:type raw_objects: dict
:return: None
"""
types_creations = self.__class__.types_creations
early_created_types = self.__class__.early_created_types
logger.info("Creating objects...")
# Before really creating the objects, we add some ghost
# ones like the bp_rule for correlation
self.add_self_defined_objects(raw_objects)
for o_type in sorted(types_creations):
if o_type not in early_created_types:
self.create_objects_for_type(raw_objects, o_type)
logger.info("Done")
def create_objects_for_type(self, raw_objects, o_type):
"""Generic function to create objects regarding the o_type
This function create real Alignak objects from the raw data got from the configuration.
:param raw_objects: Raw objects
:type raw_objects: dict
:param o_type: the object type we want to create
:type o_type: object
:return: None
"""
# Ex: the above code do for timeperiods:
# timeperiods = []
# for timeperiodcfg in objects['timeperiod']:
# t = Timeperiod(timeperiodcfg)
# timeperiods.append(t)
# self.timeperiods = Timeperiods(timeperiods)
types_creations = self.__class__.types_creations
(cls, clss, prop, initial_index, _) = types_creations[o_type]
# List to store the created objects
lst = []
try:
logger.info("- creating '%s' objects", o_type)
for obj_cfg in raw_objects[o_type]:
# We create the object
my_object = cls(obj_cfg)
# and append it to the list
lst.append(my_object)
if not lst:
logger.info(" none.")
except KeyError:
logger.info(" no %s objects in the configuration", o_type)
# Create the objects list and set it in our properties
setattr(self, prop, clss(lst, initial_index))
def early_arbiter_linking(self, arbiter_name, params):
""" Prepare the arbiter for early operations
:param arbiter_name: default arbiter name if no arbiter exist in the configuration
:type arbiter_name: str
:return: None
"""
if not self.arbiters:
params.update({
'name': arbiter_name, 'arbiter_name': arbiter_name,
'host_name': socket.gethostname(),
'address': '127.0.0.1', 'port': 7770,
'spare': '0'
})
logger.warning("There is no arbiter, I add myself (%s) reachable on %s:%d",
arbiter_name, params['address'], params['port'])
arb = ArbiterLink(params, parsing=True)
self.arbiters = ArbiterLinks([arb])
# First fill default
self.arbiters.fill_default()
self.modules.fill_default()
self.arbiters.linkify(modules=self.modules)
self.modules.linkify()
def linkify_one_command_with_commands(self, commands, prop):
"""
Link a command call (executable) with a configured commad
:param commands: object commands
:type commands: alignak.objects.command.Commands
:param prop: property name
:type prop: str
:return: None
"""
command = getattr(self, prop, None).strip()
if not command:
setattr(self, prop, None)
return
data = {
"command_line": command,
"commands": commands
}
if hasattr(self, 'poller_tag'):
data.update({"poller_tag": self.poller_tag})
if hasattr(self, 'reactionner_tag'):
data.update({"reactionner_tag": self.reactionner_tag})
setattr(self, prop, CommandCall(data, parsing=True))
def linkify(self):
""" Make 'links' between elements, like a host got a services list
with all its services in it
:return: None
"""
self.services.optimize_service_search(self.hosts)
# First linkify myself like for some global commands
self.linkify_one_command_with_commands(self.commands, 'host_perfdata_command')
self.linkify_one_command_with_commands(self.commands, 'service_perfdata_command')
self.linkify_one_command_with_commands(self.commands, 'global_host_event_handler')
self.linkify_one_command_with_commands(self.commands, 'global_service_event_handler')
# link hosts with timeperiods and commands
self.hosts.linkify(self.timeperiods, self.commands,
self.contacts, self.realms,
self.resultmodulations, self.businessimpactmodulations,
self.escalations, self.hostgroups,
self.checkmodulations, self.macromodulations)
self.hostsextinfo.merge(self.hosts)
# Do the simplify AFTER explode groups
# link hostgroups with hosts
self.hostgroups.linkify(self.hosts, self.realms, self.forced_realms_hostgroups)
# link services with other objects
self.services.linkify(self.hosts, self.commands,
self.timeperiods, self.contacts,
self.resultmodulations, self.businessimpactmodulations,
self.escalations, self.servicegroups,
self.checkmodulations, self.macromodulations)
self.servicesextinfo.merge(self.services)
# link servicegroups members with services
self.servicegroups.linkify(self.hosts, self.services)
# link notificationways with timeperiods and commands
self.notificationways.linkify(self.timeperiods, self.commands)
# link notificationways with timeperiods and commands
self.checkmodulations.linkify(self.timeperiods, self.commands)
# Link with timeperiods
self.macromodulations.linkify(self.timeperiods)
# link contacgroups with contacts
self.contactgroups.linkify(self.contacts)
# link contacts with timeperiods and commands
self.contacts.linkify(self.commands, self.notificationways)
# link timeperiods with timeperiods (exclude part)
self.timeperiods.linkify()
self.servicedependencies.linkify(self.hosts, self.services,
self.timeperiods)
self.hostdependencies.linkify(self.hosts, self.timeperiods)
self.resultmodulations.linkify(self.timeperiods)
self.businessimpactmodulations.linkify(self.timeperiods)
self.escalations.linkify(self.timeperiods, self.contacts,
self.services, self.hosts)
# Link all satellite links with modules
self.schedulers.linkify(self.modules)
self.brokers.linkify(self.modules)
self.receivers.linkify(self.modules)
self.reactionners.linkify(self.modules)
self.pollers.linkify(self.modules)
# Ok, now update all realms with back links of satellites
satellites = {}
for sat in self.schedulers:
satellites[sat.uuid] = sat
for sat in self.pollers:
satellites[sat.uuid] = sat
for sat in self.reactionners:
satellites[sat.uuid] = sat
for sat in self.receivers:
satellites[sat.uuid] = sat
for sat in self.brokers:
satellites[sat.uuid] = sat
self.realms.prepare_satellites(satellites)
def clean(self):
"""Wrapper for calling the clean method of all the configuration objects
:return: None
"""
logger.info("Cleaning configuration objects before configuration sending:")
types_creations = self.__class__.types_creations
for o_type in types_creations:
(_, _, inner_property, _, _) = types_creations[o_type]
logger.debug(" . for %s", inner_property, )
getattr(self, inner_property).clean()
def warn_about_unmanaged_parameters(self):
"""used to raise warning if the user got parameter
that we do not manage from now
:return: None
"""
properties = self.__class__.properties
unmanaged = []
for prop, entry in list(properties.items()):
if not entry.managed and hasattr(self, prop):
if entry.help:
line = "%s: %s" % (prop, entry.help)
else:
line = prop
unmanaged.append(line)
if unmanaged:
logger.warning("The following Nagios legacy parameter(s) are not currently "
"managed by Alignak:")
for line in unmanaged:
logger.warning('- %s', line)
logger.warning("Those are unmanaged configuration statements, do you really need it? "
"Create an issue on the Alignak repository or submit a pull "
"request: http://www.github.com/Alignak-monitoring/alignak")
def override_properties(self):
"""Wrapper for calling override_properties method of services attribute
:return:
"""
self.services.override_properties(self.hosts)
def explode(self):
"""Use to fill groups values on hosts and create new services
(for host group ones)
:return: None
"""
# first elements, after groups
self.contacts.explode(self.contactgroups, self.notificationways)
self.contactgroups.explode()
self.hosts.explode(self.hostgroups, self.contactgroups)
self.hostgroups.explode()
self.services.explode(self.hosts, self.hostgroups, self.contactgroups,
self.servicegroups, self.servicedependencies)
self.servicegroups.explode()
self.timeperiods.explode()
self.hostdependencies.explode(self.hostgroups)
self.servicedependencies.explode(self.hostgroups)
# Serviceescalations hostescalations will create new escalations
self.serviceescalations.explode(self.escalations)
self.hostescalations.explode(self.escalations)
self.escalations.explode(self.hosts, self.hostgroups, self.contactgroups)
# Now the architecture part
self.realms.explode()
def apply_dependencies(self):
"""Creates dependencies links between elements.
:return: None
"""
self.hosts.apply_dependencies()
self.services.apply_dependencies(self.hosts)
# # For debugging purpose if needed...
# logger.debug("Dependencies:")
# for host in self.hosts:
# logger.debug("host: %s", host)
# for uuid in host.parent_dependencies:
# logger.debug(" <- %s",
# self.hosts[uuid] if uuid in self.hosts else self.services[uuid])
# for uuid in host.child_dependencies:
# logger.debug(" -> %s",
# self.hosts[uuid] if uuid in self.hosts else self.services[uuid])
#
# for host in self.hostdependencies:
# logger.debug("hd: %s", host)
#
# for svc in self.services:
# logger.debug("service: %s", svc)
# for uuid in svc.parent_dependencies:
# logger.debug(" <- %s",
# self.hosts[uuid] if uuid in self.hosts else self.services[uuid])
# for uuid in svc.child_dependencies:
# logger.debug(" -> %s",
# self.hosts[uuid] if uuid in self.hosts else self.services[uuid])
#
# for svc in self.servicedependencies:
# logger.debug("sd: %s", svc)
def apply_inheritance(self):
"""Apply inheritance from the templates
Templates can be used in the following objects:
* hosts
* contacts
* services
* services dependencies
* hosts dependencies
* timeperiods
* hosts extinfo
* services extinfo
* service escalations
* host escalations
* escalations
:return: None
"""
logger.debug("Applying inheritance:")
types_creations = self.__class__.types_creations
for o_type in types_creations:
(_, _, inner_property, _, _) = types_creations[o_type]
# Not yet for the realms and daemons links
if inner_property in ['realms', 'arbiters', 'schedulers', 'reactionners',
'pollers', 'brokers', 'receivers',
'modules']:
continue
logger.debug(" . for %s", inner_property,)
inner_object = getattr(self, inner_property, None)
if inner_object is None:
logger.debug("No %s to fill with default values", inner_property)
continue
inner_object.apply_inheritance()
def apply_implicit_inheritance(self):
"""Wrapper for calling apply_implicit_inheritance method of services attributes
Implicit inheritance is between host and service (like notification parameters etc)
:return:None
"""
self.services.apply_implicit_inheritance(self.hosts)
def fill_default_configuration(self):
"""Fill objects properties with default value if necessary
:return: None
"""
logger.debug("Filling the unset properties with their default value:")
types_creations = self.__class__.types_creations
for o_type in types_creations:
(_, _, inner_property, _, _) = types_creations[o_type]
# Not yet for the realms and daemons links
if inner_property in ['realms', 'arbiters', 'schedulers', 'reactionners',
'pollers', 'brokers', 'receivers']:
continue
logger.debug(" . for %s", inner_property,)
inner_object = getattr(self, inner_property, None)
if inner_object is None:
logger.debug("No %s to fill with default values", inner_property)
continue
inner_object.fill_default()
# We have all monitored elements, we can create a default realm if none is defined
if not getattr(self, 'realms', None):
self.fill_default_realm()
self.realms.fill_default()
# Then we create missing satellites, so no other satellites will be created after
# We also define the default realm
self.fill_default_satellites(self.launch_missing_daemons)
types_creations = self.__class__.types_creations
for o_type in types_creations:
(_, _, inner_property, _, _) = types_creations[o_type]
# Only for the daemons links
if inner_property in ['schedulers', 'reactionners', 'pollers', 'brokers', 'receivers']:
logger.debug(" . for %s", inner_property,)
inner_object = getattr(self, inner_property)
inner_object.fill_default()
# Now fill some fields we can predict (like address for hosts)
self.hosts.fill_predictive_missing_parameters()
self.services.fill_predictive_missing_parameters()
def fill_default_realm(self):
"""Check if a realm is defined, if not
Create a new one (default) and tag everyone that do not have
a realm prop to be put in this realm
:return: None
"""
if not getattr(self, 'realms', None):
# Create a default realm so all hosts without realm will be linked with it
default = Realm({
'realm_name': u'All', 'alias': u'Self created default realm', 'default': '1'
})
self.realms = Realms([default])
logger.warning("No realms defined, I am adding one as %s", default.get_name())
# Check that a default realm (and only one) is defined and get this default realm
self.realms.fill_default()
def log_daemons_list(self):
"""Log Alignak daemons list
:return:
"""
daemons = [self.arbiters, self.schedulers, self.pollers,
self.brokers, self.reactionners, self.receivers]
for daemons_list in daemons:
if not daemons_list:
logger.debug("- %ss: None", daemons_list.inner_class.my_type)
else:
logger.debug("- %ss: %s", daemons_list.inner_class.my_type,
','.join([daemon.get_name() for daemon in daemons_list]))
def fill_default_satellites(self, alignak_launched=False):
# pylint: disable=too-many-branches, too-many-locals, too-many-statements
"""If a required satellite is missing in the configuration, we create a new satellite
on localhost with some default values
:param alignak_launched: created daemons are to be launched or not
:type alignak_launched: bool
:return: None
"""
# Log all satellites list
logger.debug("Alignak configured daemons list:")
self.log_daemons_list()
# We must create relations between the realms first. This is necessary to have
# an accurate map of the situation!
self.realms.linkify()
self.realms.get_default(check=True)
# Get list of known realms
# realms_names = [realm.get_name() for realm in self.realms]
# Create one instance of each satellite type if it does not exist...
if not self.schedulers:
logger.warning("No scheduler defined, I am adding one on 127.0.0.1:%d",
self.daemons_initial_port)
satellite = SchedulerLink({'type': 'scheduler', 'name': 'Default-Scheduler',
'realm': self.realms.default.get_name(),
'alignak_launched': alignak_launched,
'missing_daemon': True,
'spare': '0', 'manage_sub_realms': '0',
'address': '127.0.0.1', 'port': self.daemons_initial_port})
self.daemons_initial_port = self.daemons_initial_port + 1
self.schedulers = SchedulerLinks([satellite])
self.missing_daemons.append(satellite)
if not self.reactionners:
logger.warning("No reactionner defined, I am adding one on 127.0.0.1:%d",
self.daemons_initial_port)
satellite = ReactionnerLink({'type': 'reactionner', 'name': 'Default-Reactionner',
'realm': self.realms.default.get_name(),
'alignak_launched': alignak_launched,
'missing_daemon': True,
'spare': '0', 'manage_sub_realms': '0',
'address': '127.0.0.1', 'port': self.daemons_initial_port})
self.daemons_initial_port = self.daemons_initial_port + 1
self.reactionners = ReactionnerLinks([satellite])
self.missing_daemons.append(satellite)
if not self.pollers:
logger.warning("No poller defined, I am adding one on 127.0.0.1:%d",
self.daemons_initial_port)
satellite = PollerLink({'type': 'poller', 'name': 'Default-Poller',
'realm': self.realms.default.get_name(),
'alignak_launched': alignak_launched,
'missing_daemon': True,
'spare': '0', 'manage_sub_realms': '0',
'address': '127.0.0.1', 'port': self.daemons_initial_port})
self.daemons_initial_port = self.daemons_initial_port + 1
self.pollers = PollerLinks([satellite])
self.missing_daemons.append(satellite)
if not self.brokers:
logger.warning("No broker defined, I am adding one on 127.0.0.1:%d",
self.daemons_initial_port)
satellite = BrokerLink({'type': 'broker', 'name': 'Default-Broker',
'realm': self.realms.default.get_name(),
'alignak_launched': alignak_launched,
'missing_daemon': True,
'spare': '0', 'manage_sub_realms': '0',
'address': '127.0.0.1', 'port': self.daemons_initial_port})
self.daemons_initial_port = self.daemons_initial_port + 1
self.brokers = BrokerLinks([satellite])
self.missing_daemons.append(satellite)
if not self.receivers:
logger.warning("No receiver defined, I am adding one on 127.0.0.1:%d",
self.daemons_initial_port)
satellite = ReceiverLink({'type': 'receiver', 'name': 'Default-Receiver',
'alignak_launched': alignak_launched,
'missing_daemon': True,
'spare': '0', 'manage_sub_realms': '0',
'address': '127.0.0.1', 'port': self.daemons_initial_port})
self.daemons_initial_port = self.daemons_initial_port + 1
self.receivers = ReceiverLinks([satellite])
self.missing_daemons.append(satellite)
# Assign default realm to the satellites that do not have a defined realm
for satellites_list in [self.pollers, self.brokers, self.reactionners,
self.receivers, self.schedulers]:
for satellite in satellites_list:
# Here the 'realm' property is not yet a real realm object uuid ...
# but still a realm name! Make it a realm uuid
if not getattr(satellite, 'realm', None):
satellite.realm = self.realms.default.get_name()
sat_realm = self.realms.find_by_name(satellite.realm)
if not sat_realm:
self.add_error("The %s '%s' is affected to an unknown realm: '%s'"
% (satellite.type, satellite.name, satellite.realm))
continue
# satellite.realm_name = sat_realm.get_name()
logger.info("Tagging satellite '%s' with realm %s", satellite.name, satellite.realm)
satellite.realm = sat_realm.uuid
satellite.realm_name = sat_realm.get_name()
# Alert for spare daemons
if getattr(satellite, 'spare', False):
self.add_warning("The %s '%s' is declared as a spare daemon. "
"Spare mode is not yet implemented and it will be ignored."
% (satellite.type, satellite.name))
continue
# Alert for non active daemons
if not getattr(satellite, 'active', False):
self.add_warning("The %s '%s' is declared as a non active daemon. "
"It will be ignored."
% (satellite.type, satellite.name))
continue
# And tell the realm that it knows the satellite
realm_satellites = getattr(sat_realm, '%ss' % satellite.type)
if satellite.uuid not in realm_satellites:
realm_satellites.append(satellite.uuid)
# If the satellite manages sub realms...
# We update the "potential_" satellites that may be used for this realm
if satellite.manage_sub_realms:
for realm_uuid in sat_realm.all_sub_members:
logger.debug("Linkify %s '%s' with realm %s",
satellite.type, satellite.name,
self.realms[realm_uuid].get_name())
realm_satellites = getattr(self.realms[realm_uuid],
'potential_%ss' % satellite.type)
if satellite.uuid not in realm_satellites:
realm_satellites.append(satellite.uuid)
# Parse hosts for realms and set host in the default realm if no realm is set
hosts_realms_names = set()
logger.debug("Hosts realm configuration:")
for host in self.hosts:
if not getattr(host, 'realm', None):
# todo: perharps checking hostgroups realm (if any) to set an hostgroup realm
# rather than the default realm
logger.debug("Host: %s, realm: %s, hostgroups: %s",
host.get_name(), host.realm, host.hostgroups)
host.realm = self.realms.default.get_name()
host.got_default_realm = True
host_realm = self.realms.find_by_name(host.realm)
if not host_realm:
self.add_error("The host '%s' is affected to an unknown realm: '%s'"
% (host.get_name(), host.realm))
continue
host.realm_name = host_realm.get_name()
host_realm.add_members(host.get_name())
logger.debug("- tagging host '%s' with realm %s", host.get_name(), host.realm_name)
hosts_realms_names.add(host.realm_name)
logger.debug(" - %s: realm %s, active %s, passive %s",
host.get_name(), host_realm.get_name(),
host.active_checks_enabled, host.passive_checks_enabled)
host_realm.passively_checked_hosts = \
host_realm.passively_checked_hosts or host.passive_checks_enabled
host_realm.actively_checked_hosts = \
host_realm.actively_checked_hosts or host.passive_checks_enabled
hosts_realms_names.add(host.realm)
# Parse hostgroups for realms and set hostgroup in the default realm if no realm is set
hostgroups_realms_names = set()
logger.debug("Hostgroups realm configuration:")
for hostgroup in self.hostgroups:
if not getattr(hostgroup, 'realm', None):
hostgroup.realm = self.realms.default.get_name()
hostgroup.got_default_realm = True
hostgroup_realm = self.realms.find_by_name(hostgroup.realm)
if not hostgroup_realm:
self.add_error("The hostgroup '%s' is affected to an unknown realm: '%s'"
% (hostgroup.get_name(), hostgroup.realm))
continue
hostgroup.realm_name = hostgroup_realm.get_name()
hostgroup_realm.add_group_members(hostgroup.get_name())
logger.debug("- tagging hostgroup '%s' with realm %s",
hostgroup.get_name(), hostgroup.realm_name)
hostgroups_realms_names.add(hostgroup.realm_name)
# Check that all daemons and realms are coherent
for satellites_list in [self.pollers, self.brokers, self.reactionners,
self.receivers, self.schedulers]:
sat_class = satellites_list.inner_class
# Collect the names of all the realms that are managed by all the satellites
sat_realms_names = set()
for satellite in satellites_list:
for realm in self.realms:
realm_satellites = getattr(realm, '%ss' % satellite.type)
realm_potential_satellites = getattr(realm, 'potential_%ss' % satellite.type)
if satellite.uuid in realm_satellites or \
satellite.uuid in realm_potential_satellites:
sat_realms_names.add(realm.get_name())
if not hosts_realms_names.issubset(sat_realms_names):
# Check if a daemon is able to manage the concerned hosts...
for realm_name in hosts_realms_names.difference(sat_realms_names):
realm = self.realms.find_by_name(realm_name)
self.add_warning("Some hosts exist in the realm '%s' but no %s is "
"defined for this realm." % (realm_name, sat_class.my_type))
if not alignak_launched:
continue
# Add a self-generated daemon
logger.warning("Adding a %s for the realm: %s", satellite.type, realm_name)
new_daemon = sat_class({
'type': satellite.type, 'name': '%s-%s' % (satellite.type, realm_name),
'alignak_launched': True, 'missing_daemon': True,
'realm': realm.uuid, 'manage_sub_realms': '0', 'spare': '0',
'address': '127.0.0.1', 'port': self.daemons_initial_port
})
satellites_list.add_item(new_daemon)
# And tell the realm that it knows the satellite
realm_satellites = getattr(realm, '%ss' % satellite.type)
if new_daemon.uuid not in realm_satellites:
realm_satellites.append(new_daemon.uuid)
self.add_warning("Added a %s (%s, %s) for the realm '%s'"
% (satellite.type, '%s-%s' % (satellite.type, realm_name),
satellite.uri, realm_name))
self.daemons_initial_port = self.daemons_initial_port + 1
self.missing_daemons.append(new_daemon)
logger.debug("Realms hosts configuration:")
for realm in self.realms:
logger.debug("Realm: %s, actively checked hosts %s, passively checked hosts %s",
realm.get_name(), realm.actively_checked_hosts,
realm.passively_checked_hosts)
logger.info("Realm: %s, hosts: %s, groups: %s",
realm.get_name(), realm.members, realm.group_members)
# Log all satellites list
logger.debug("Alignak definitive daemons list:")
self.log_daemons_list()
def got_broker_module_type_defined(self, module_type):
"""Check if a module type is defined in one of the brokers
:param module_type: module type to search for
:type module_type: str
:return: True if mod_type is found else False
:rtype: bool
"""
for broker_link in self.brokers:
for module in broker_link.modules:
if module.is_a_module(module_type):
return True
return False
def got_scheduler_module_type_defined(self, module_type):
"""Check if a module type is defined in one of the schedulers
:param module_type: module type to search for
:type module_type: str
:return: True if mod_type is found else False
:rtype: bool
TODO: Factorize it with got_broker_module_type_defined
"""
for scheduler_link in self.schedulers:
for module in scheduler_link.modules:
if module.is_a_module(module_type):
return True
return False
def got_arbiter_module_type_defined(self, module_type):
"""Check if a module type is defined in one of the arbiters
Also check the module name
:param module_type: module type to search for
:type module_type: str
:return: True if mod_type is found else False
:rtype: bool
TODO: Factorize it with got_broker_module_type_defined:
"""
for arbiter in self.arbiters:
# Do like the linkify will do after....
for module in getattr(arbiter, 'modules', []):
# So look at what the arbiter try to call as module
module_name = module.get_name()
# Ok, now look in modules...
for mod in self.modules:
# try to see if this module is the good type
if getattr(mod, 'python_name', '').strip() == module_type.strip():
# if so, the good name?
if getattr(mod, 'name', '').strip() == module_name:
return True
return False
def create_business_rules(self):
"""Create business rules for hosts and services
:return: None
"""
self.hosts.create_business_rules(self.hosts, self.services,
self.hostgroups, self.servicegroups,
self.macromodulations, self.timeperiods)
self.services.create_business_rules(self.hosts, self.services,
self.hostgroups, self.servicegroups,
self.macromodulations, self.timeperiods)
def create_business_rules_dependencies(self):
"""Create business rules dependencies for hosts and services
:return: None
"""
for item in itertools.chain(self.hosts, self.services):
if not getattr(item, 'got_business_rule', None):
continue
bp_items = item.business_rule.list_all_elements()
for bp_item_uuid in bp_items:
if bp_item_uuid in self.hosts:
bp_item = self.hosts[bp_item_uuid]
notif_options = item.business_rule_host_notification_options
else:
bp_item = self.services[bp_item_uuid]
notif_options = item.business_rule_service_notification_options
if notif_options:
bp_item.notification_options = notif_options
bp_item.act_depend_of_me.append((item.uuid, ['d', 'u', 's', 'f', 'c', 'w', 'x'],
'', True))
# Parent / children relations
if bp_item.uuid not in item.parent_dependencies:
item.parent_dependencies.append(bp_item.uuid)
if item.uuid not in bp_item.child_dependencies:
bp_item.child_dependencies.append(item.uuid)
def hack_old_nagios_parameters(self):
# pylint: disable=too-many-branches
""" Check if modules exist for some of the Nagios legacy parameters.
If no module of the required type is present, it alerts the user that the parameters will
be ignored and the functions will be disabled, else it encourages the user to set the
correct parameters in the installed modules.
Note that some errors are raised if some parameters are used and no module is found
to manage the corresponding feature.
TODO: clean this part of the configuration checking! Nagios ascending compatibility!
:return: modules list
:rtype: list
"""
modules = []
# For status_dat
if getattr(self, 'status_file', None) and getattr(self, 'object_cache_file', None):
msg = "The configuration parameters '%s = %s' and '%s = %s' are deprecated " \
"and will be ignored. Please configure your external 'retention' module " \
"as expected." % \
('status_file', self.status_file,
'object_cache_file', self.object_cache_file)
logger.warning(msg)
self.add_warning(msg)
# Now the log_file
if getattr(self, 'log_file', None):
msg = "The configuration parameter '%s = %s' is deprecated " \
"and will be ignored. Please configure your external 'logs' module " \
"as expected." % \
('log_file', self.log_file)
logger.warning(msg)
self.add_warning(msg)
# Now the syslog facility
if getattr(self, 'use_syslog', None):
msg = "The configuration parameter '%s = %s' is deprecated " \
"and will be ignored. Please configure your external 'logs' module " \
"as expected." % \
('use_syslog', self.use_syslog)
logger.warning(msg)
self.add_warning(msg)
# Now the host_perfdata or service_perfdata module
if getattr(self, 'service_perfdata_file', None) or \
getattr(self, 'host_perfdata_file', None):
msg = "The configuration parameters '%s = %s' and '%s = %s' are Nagios legacy " \
"parameters. Alignak will use its inner 'metrics' module " \
"to match the expected behavior." \
% ('host_perfdata_file', self.host_perfdata_file,
'service_perfdata_file', self.service_perfdata_file)
logger.warning(msg)
self.add_warning(msg)
mod_configuration = {
'name': 'inner-metrics',
'type': 'metrics',
'python_name': 'alignak.modules.inner_metrics',
'enabled': True
}
if getattr(self, 'host_perfdata_file', None):
mod_configuration['host_perfdata_file'] = \
getattr(self, 'host_perfdata_file')
if getattr(self, 'service_perfdata_file', None):
mod_configuration['service_perfdata_file'] = \
getattr(self, 'service_perfdata_file')
logger.debug("inner metrics module, configuration: %s", mod_configuration)
modules.append((
'broker', mod_configuration
))
# Now the Nagios legacy retention file module
if hasattr(self, 'retain_state_information') and self.retain_state_information:
# Do not raise a warning log for this, only an information
msg = "The configuration parameter '%s = %s' is a Nagios legacy " \
"parameter. Alignak will use its inner 'retention' module " \
"to match the expected behavior." \
% ('retain_state_information', self.retain_state_information)
logger.info(msg)
# self.add_warning(msg)
mod_configuration = {
'name': 'inner-retention',
'type': 'retention',
'python_name': 'alignak.modules.inner_retention',
'enabled': True
}
if getattr(self, 'state_retention_file', None) is not None:
mod_configuration['retention_file'] = getattr(self, 'state_retention_file')
if getattr(self, 'state_retention_dir', None) is not None:
mod_configuration['retention_dir'] = getattr(self, 'state_retention_dir')
if getattr(self, 'retention_update_interval', None):
self.tick_update_retention = int(self.retention_update_interval) * 60
mod_configuration['retention_period'] = int(self.retention_update_interval) * 60
logger.debug("inner retention module, configuration: %s", mod_configuration)
modules.append((
'scheduler', mod_configuration
))
# Now the command_file
if hasattr(self, 'command_file') and getattr(self, 'command_file'):
msg = "The configuration parameter '%s = %s' is deprecated " \
"and will be ignored. Please configure an external commands capable " \
"module as expected (eg external-commands, NSCA, or WS module may suit." \
% ('command_file', self.command_file)
logger.warning(msg)
self.add_warning(msg)
return modules
def propagate_timezone_option(self):
"""Set our timezone value and give it too to unset satellites
:return: None
"""
if self.use_timezone:
# first apply myself
os.environ['TZ'] = self.use_timezone
time.tzset()
tab = [self.schedulers, self.pollers, self.brokers, self.receivers, self.reactionners]
for sat_list in tab:
for sat in sat_list:
if sat.use_timezone == 'NOTSET':
setattr(sat, 'use_timezone', self.use_timezone)
def linkify_templates(self):
""" Like for normal object, we link templates with each others
:return: None
"""
self.timeperiods.linkify_templates()
self.contacts.linkify_templates()
self.hosts.linkify_templates()
self.services.linkify_templates()
self.hostdependencies.linkify_templates()
self.servicedependencies.linkify_templates()
self.hostsextinfo.linkify_templates()
self.servicesextinfo.linkify_templates()
self.escalations.linkify_templates()
self.serviceescalations.linkify_templates()
self.hostescalations.linkify_templates()
def check_error_on_hard_unmanaged_parameters(self):
"""Some parameters are just not managed like O*HP commands and regexp capabilities
:return: True if we encounter an error, otherwise False
:rtype: bool
"""
valid = True
if self.use_regexp_matching:
msg = "use_regexp_matching parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
if getattr(self, 'failure_prediction_enabled', None):
msg = "failure_prediction_enabled parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
if getattr(self, 'obsess_over_hosts', None):
msg = "obsess_over_hosts parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
if getattr(self, 'ochp_command', None):
msg = "ochp_command parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
if getattr(self, 'ochp_timeout', None):
msg = "ochp_timeout parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
if getattr(self, 'obsess_over_services', None):
msg = "obsess_over_services parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
if getattr(self, 'ocsp_command', None):
msg = "ocsp_command parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
if getattr(self, 'ocsp_timeout', None):
msg = "ocsp_timeout parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
return valid
def is_correct(self): # pylint: disable=too-many-branches, too-many-statements, too-many-locals
"""Check if all elements got a good configuration
:return: True if the configuration is correct else False
:rtype: bool
"""
logger.info('Running pre-flight check on configuration data, initial state: %s',
self.conf_is_correct)
# valid = self.conf_is_correct
# Check if alignak_name is defined
if not self.alignak_name:
logger.info('Alignak name is not defined, using the main arbiter name...')
for arbiter in self.arbiters:
if not arbiter.spare:
self.alignak_name = arbiter.name
break
logger.info('Alignak name is: %s', self.alignak_name)
# Globally un-managed parameters
if not self.read_config_silent:
logger.info('Checking global parameters...')
# Old Nagios legacy unmanaged parameters
self.check_error_on_hard_unmanaged_parameters()
# If we got global event handlers, they should be valid
if self.global_host_event_handler and not self.global_host_event_handler.is_valid():
self.add_error("global host event_handler '%s' is invalid"
% self.global_host_event_handler.command)
if self.global_service_event_handler and not self.global_service_event_handler.is_valid():
self.add_error("global service event_handler '%s' is invalid"
% self.global_service_event_handler.command)
# If we got perfdata commands, they should be valid
if self.host_perfdata_command and not self.host_perfdata_command.is_valid():
self.add_error("host perfdata command '%s' is invalid"
% self.host_perfdata_command.command)
if self.service_perfdata_command and not self.service_perfdata_command.is_valid():
self.add_error("service perfdata command '%s' is invalid"
% self.service_perfdata_command.command)
if not self.read_config_silent:
logger.info('Checked')
if not self.read_config_silent:
logger.info('Checking monitoring configuration...')
classes = [strclss for _, _, strclss, _, _ in list(self.types_creations.values())]
for strclss in sorted(classes):
# todo: check why ignored!
if strclss in ['hostescalations', 'serviceescalations']:
logger.debug("Ignoring correctness check for '%s'...", strclss)
continue
if not self.read_config_silent:
logger.info('- checking %s...', strclss)
try:
checked_list = getattr(self, strclss)
except AttributeError: # pragma: no cover, simple protection
logger.info("\t%s are not present in the configuration", strclss)
continue
if not checked_list.is_correct():
if not self.read_config_silent:
logger.info('Checked %s, configuration is incorrect!', strclss)
self.add_error(checked_list.configuration_errors)
self.add_error("%s configuration is incorrect!" % strclss)
logger.error("%s configuration is incorrect!", strclss)
if checked_list.configuration_warnings:
self.add_warning(checked_list.configuration_warnings)
logger.info(" %d warning(s), total: %d",
len(checked_list.configuration_warnings),
len(self.configuration_warnings))
if not self.read_config_silent:
try:
dump_list = sorted(checked_list, key=lambda k: k.get_name())
except AttributeError: # pragma: no cover, simple protection
dump_list = checked_list
# Dump at DEBUG level because some tests break with INFO level, and it is not
# really necessary to have information about each object ;
for cur_obj in dump_list:
if strclss == 'services':
logger.debug(' %s', cur_obj.get_full_name())
else:
logger.debug(' %s', cur_obj.get_name())
if checked_list:
logger.info(' checked %d', len(checked_list))
else:
logger.info(' none')
if not self.read_config_silent:
logger.info('Checked')
# Parse hosts and services for tags and realms
hosts_tag = set()
services_tag = set()
for host in self.hosts:
hosts_tag.add(host.poller_tag)
for service in self.services:
services_tag.add(service.poller_tag)
# Check that for each poller_tag of a host, a poller exists with this tag
pollers_tag = set()
for poller in self.pollers:
for tag in poller.poller_tags:
pollers_tag.add(tag)
if not hosts_tag.issubset(pollers_tag):
for tag in hosts_tag.difference(pollers_tag):
self.add_error("Error: some hosts have the poller_tag %s but no poller "
"has this tag" % tag)
if not services_tag.issubset(pollers_tag):
for tag in services_tag.difference(pollers_tag):
self.add_error("some services have the poller_tag %s but no poller "
"has this tag" % tag)
# Check that all hosts involved in business_rules are from the same realm
for item in self.hosts:
if getattr(item, 'got_business_rule', None):
continue
realm = self.realms.find_by_name(item.realm)
if not realm:
if realm in self.realms:
realm = self.realms[item.realm]
else:
# Something was wrong in the conf, will be raised elsewhere
continue
for elt_uuid in item.business_rule.list_all_elements():
if elt_uuid not in self.hosts:
# An error or a service element
continue
host = self.hosts[elt_uuid]
if host.realm not in self.realms:
# Something was wrong in the conf, will be raised elsewhere
continue
host_realm = self.realms[host.realm]
if host_realm.get_name() != realm.get_name():
logger.error("Business_rule '%s' got some hosts from another realm: %s",
item.get_full_name(), host_realm.get_name())
self.add_error("Error: Business_rule '%s' got hosts from another "
"realm: %s" % (item.get_full_name(), host_realm.get_name()))
# If configuration error messages exist, then the configuration is not valid
# Log the error messages
if self.configuration_errors:
logger.error("Configuration errors:")
for msg in self.configuration_errors:
logger.error(msg)
return self.conf_is_correct
def explode_global_conf(self):
"""Explode parameters like cached_service_check_horizon in the
Service class in a cached_check_horizon manner, o*hp commands etc
:return: None
"""
for cls, _, strclss, _, _ in list(self.types_creations.values()):
logger.debug("Applying global conf for the class '%s'...", strclss)
cls.load_global_conf(self)
def remove_templates(self):
"""Clean useless elements like templates because they are not needed anymore
:return: None
"""
self.timeperiods.remove_templates()
self.contacts.remove_templates()
self.hosts.remove_templates()
self.services.remove_templates()
self.hostdependencies.remove_templates()
self.servicedependencies.remove_templates()
self.hostsextinfo.remove_templates()
self.servicesextinfo.remove_templates()
self.escalations.remove_templates()
self.servicedependencies.remove_templates()
self.hostdependencies.remove_templates()
def show_errors(self):
"""
Loop over configuration warnings and log them as INFO log
Loop over configuration errors and log them as INFO log
Note that the warnings and errors are logged on the fly during the configuration parsing.
It is not necessary to log as WARNING and ERROR in this function which is used as a sum-up
on the end of configuration parsing when an error has been detected.
:return: None
"""
if self.configuration_warnings:
logger.warning("Configuration warnings:")
for msg in self.configuration_warnings:
logger.warning(msg)
if self.configuration_errors:
logger.warning("Configuration errors:")
for msg in self.configuration_errors:
logger.warning(msg)
def create_packs(self):
# pylint: disable=too-many-statements,too-many-locals,too-many-branches, unused-argument
"""Create packs of hosts and services (all dependencies are resolved)
It create a graph. All hosts are connected to their
parents, and hosts without parent are connected to host 'root'.
services are linked to their host. Dependencies between hosts/services are managed.
REF: doc/pack-creation.png
:return: None
"""
logger.info("- creating hosts packs for the realms:")
# We create a graph with host in nodes
graph = Graph()
graph.add_nodes(list(self.hosts.items.keys()))
# links will be used for relations between hosts
links = set()
# Now the relations
for host in self.hosts:
# Add parent relations
for parent in getattr(host, 'parents', []):
if parent:
links.add((parent, host.uuid))
# Add the others dependencies
for (dep, _, _, _) in host.act_depend_of:
links.add((dep, host.uuid))
for (dep, _, _, _, _) in host.chk_depend_of:
links.add((dep, host.uuid))
# For services: they are linked with their own host but we need
# to have the hosts of the service dependency in the same pack too
for service in self.services:
for (dep_id, _, _, _) in service.act_depend_of:
if dep_id in self.services:
dep = self.services[dep_id]
else:
dep = self.hosts[dep_id]
# I don't care about dep host: they are just the host
# of the service...
if hasattr(dep, 'host'):
links.add((dep.host, service.host))
# The other type of dep
for (dep_id, _, _, _, _) in service.chk_depend_of:
if dep_id in self.services:
dep = self.services[dep_id]
else:
dep = self.hosts[dep_id]
links.add((dep.host, service.host))
# For host/service that are business based, we need to link them too
for service in [srv for srv in self.services if srv.got_business_rule]:
for elem_uuid in service.business_rule.list_all_elements():
if elem_uuid in self.services:
elem = self.services[elem_uuid]
if elem.host != service.host: # do not link a host with itself
links.add((elem.host, service.host))
else: # it's already a host but only if it is in the known hosts list!
if elem_uuid in self.hosts and elem_uuid != service.host:
links.add((elem_uuid, service.host))
# Same for hosts of course
for host in [hst for hst in self.hosts if hst.got_business_rule]:
for elem_uuid in host.business_rule.list_all_elements():
if elem_uuid in self.services: # if it's a service
elem = self.services[elem_uuid]
if elem.host != host.uuid:
links.add((elem.host, host.uuid))
else: # e is a host
if elem_uuid != host.uuid:
links.add((elem_uuid, host.uuid))
# Now we create links in the graph. With links (set)
# We are sure to call the less add_edge
for (dep, host) in links:
graph.add_edge(dep, host)
graph.add_edge(host, dep)
# Now We find the default realm
default_realm = self.realms.get_default()
# Access_list from a node il all nodes that are connected
# with it: it's a list of ours mini_packs
# Now we look if all elements of all packs have the
# same realm. If not, not good!
for hosts_pack in graph.get_accessibility_packs():
passively_checked_hosts = False
actively_checked_hosts = False
tmp_realms = set()
logger.debug(" - host pack hosts:")
for host_id in hosts_pack:
host = self.hosts[host_id]
if not host:
continue
logger.debug(" - %s", host.get_name())
passively_checked_hosts = passively_checked_hosts or host.passive_checks_enabled
actively_checked_hosts = actively_checked_hosts or host.active_checks_enabled
if host.realm:
tmp_realms.add(host.realm)
if len(tmp_realms) > 1:
self.add_error("Error: the realm configuration of your hosts is not correct "
"because there is more than one realm in one pack (host relations):")
for host_id in hosts_pack:
host = self.hosts[host_id]
if not host.realm:
self.add_error(' -> the host %s do not have a realm' % host.get_name())
else:
# Do not use get_name for the realm because it is not an object but a
# string containing the not found realm name if the realm is not existing!
# As of it, it may raise an exception
if host.realm not in self.realms:
self.add_error(' -> the host %s is in the realm %s' %
(host.get_name(), host.realm))
else:
host_realm = self.realms[host.realm]
self.add_error(' -> the host %s is in the realm %s' %
(host.get_name(), host_realm.get_name()))
if len(tmp_realms) == 1: # Ok, good
tmp_realm = tmp_realms.pop()
if tmp_realm in self.realms:
realm = self.realms[tmp_realm]
else:
realm = self.realms.find_by_name(tmp_realm)
if not realm:
self.add_error(' -> some hosts are in an unknown realm %s!' % tmp_realm)
else:
# Set the current hosts pack to its realm
logger.debug(" - append pack %s to realm %s", hosts_pack, realm.get_name())
realm.packs.append(hosts_pack)
# Set if the realm only has passively or actively checked hosts...
realm.passively_checked_hosts = passively_checked_hosts
realm.actively_checked_hosts = actively_checked_hosts
elif not tmp_realms: # Hum... no realm value? So default Realm
if default_realm is not None:
# Set the current hosts pack to the default realm
default_realm.packs.append(hosts_pack)
else:
self.add_error("Error: some hosts do not have a realm and you did not "
"defined a default realm!")
for host in hosts_pack:
self.add_error(' Impacted host: %s ' % host.get_name())
# The load balancing is for a loop, so all
# hosts of a realm (in a pack) will be dispatched
# to the schedulers of this realm
# REF: doc/pack-aggregation.png
# Count the numbers of elements in all the realms,
# to compare with the total number of hosts
nb_elements_all_realms = 0
for realm in self.realms:
packs = {}
# create round-robin iterator for id of cfg
# So dispatching is load balanced in a realm
# but add a entry in the round-robin tourniquet for
# every weight point schedulers (so Weight round robin)
weight_list = []
no_spare_schedulers = realm.schedulers
if not no_spare_schedulers:
if realm.potential_schedulers:
no_spare_schedulers = [realm.potential_schedulers[0]]
nb_schedulers = len(no_spare_schedulers)
if nb_schedulers:
logger.info(" %d scheduler(s) for the realm %s", nb_schedulers, realm.get_name())
else:
logger.warning(" no scheduler for the realm %s", realm.get_name())
# Maybe there is no scheduler in the realm, it can be a
# big problem if there are elements in packs
nb_elements = 0
for hosts_pack in realm.packs:
nb_elements += len(hosts_pack)
nb_elements_all_realms += len(hosts_pack)
realm.hosts_count = nb_elements
if nb_elements:
if not nb_schedulers:
self.add_error("The realm %s has %d hosts but no scheduler!"
% (realm.get_name(), nb_elements))
realm.packs = [] # Dumb pack
continue
logger.info(" %d hosts in the realm %s, distributed in %d linked packs",
nb_elements, realm.get_name(), len(realm.packs))
else:
logger.info(" no hosts in the realm %s", realm.get_name())
# Create a relation between a pack and each scheduler in the realm
packindex = 0
packindices = {}
for s_id in no_spare_schedulers:
scheduler = self.schedulers[s_id]
logger.debug(" scheduler: %s", scheduler.instance_id)
packindices[s_id] = packindex
packindex += 1
for i in range(0, scheduler.weight):
weight_list.append(s_id)
logger.debug(" pack indices: %s", packindices)
# packindices is indexed with the scheduler id and contains
# the configuration part number to get used: sched1:0, sched2: 1, ...
round_robin = itertools.cycle(weight_list)
# We must initialize nb_schedulers packs
for i in range(0, nb_schedulers):
packs[i] = []
# Try to load the history association dict so we will try to
# send the hosts in the same "pack"
assoc = {}
# Now we explode the numerous packs into reals packs:
# we 'load balance' them in a round-robin way but with count number of hosts in
# case have some packs with too many hosts and other with few
realm.packs.sort(reverse=True)
pack_higher_hosts = 0
for hosts_pack in realm.packs:
valid_value = False
old_pack = -1
for host_id in hosts_pack:
host = self.hosts[host_id]
if not host:
continue
old_i = assoc.get(host.get_name(), -1)
# Maybe it's a new, if so, don't count it
if old_i == -1:
continue
# Maybe it is the first we look at, if so, take it's value
if old_pack == -1 and old_i != -1:
old_pack = old_i
valid_value = True
continue
if old_i == old_pack:
valid_value = True
if old_i != old_pack:
valid_value = False
# If it's a valid sub pack and the pack id really exist, use it!
if valid_value and old_pack in packindices:
i = old_pack
else:
if isinstance(i, int):
i = next(round_robin)
elif (len(packs[packindices[i]]) + len(hosts_pack)) >= pack_higher_hosts:
pack_higher_hosts = (len(packs[packindices[i]]) + len(hosts_pack))
i = next(round_robin)
for host_id in hosts_pack:
host = self.hosts[host_id]
if not host:
continue
packs[packindices[i]].append(host_id)
assoc[host.get_name()] = i
# Now packs is a dictionary indexed with the configuration part
# number and containing the list of hosts
realm.packs = packs
logger.info(" total number of hosts in all realms: %d", nb_elements_all_realms)
if len(self.hosts) != nb_elements_all_realms:
logger.warning("There are %d hosts defined, and %d hosts dispatched in the realms. "
"Some hosts have been ignored", len(self.hosts), nb_elements_all_realms)
self.add_error("There are %d hosts defined, and %d hosts dispatched in the realms. "
"Some hosts have been "
"ignored" % (len(self.hosts), nb_elements_all_realms))
def cut_into_parts(self):
# pylint: disable=too-many-branches, too-many-locals, too-many-statements
"""Cut conf into part for scheduler dispatch.
Basically it provides a set of host/services for each scheduler that
have no dependencies between them
:return: None
"""
# User must have set a spare if he needed one
logger.info("Splitting the configuration into parts:")
nb_parts = 0
for realm in self.realms:
no_spare_schedulers = realm.schedulers
if not no_spare_schedulers:
if realm.potential_schedulers:
no_spare_schedulers = [realm.potential_schedulers[0]]
nb_schedulers = len(no_spare_schedulers)
nb_parts += nb_schedulers
if nb_schedulers:
logger.info(" %d scheduler(s) for the realm %s", nb_schedulers, realm.get_name())
else:
logger.warning(" no scheduler for the realm %s", realm.get_name())
if nb_parts == 0:
nb_parts = 1
# We create dummy configurations for schedulers:
# they are clone of the master configuration but without hosts and
# services (because they are splitted between these configurations)
logger.info("Splitting the configuration into %d parts...", nb_parts)
self.parts = {}
for part_index in range(0, nb_parts):
self.parts[part_index] = Config()
# Now we copy all properties of conf into the new ones
for prop, entry in sorted(list(Config.properties.items())):
# Do not copy the configuration instance id nor name!
if prop in ['instance_id', 'config_name']:
continue
# Only the one that are managed and used
if entry.managed and not isinstance(entry, UnusedProp):
val = getattr(self, prop, None)
setattr(self.parts[part_index], prop, val)
# Set the cloned configuration name
self.parts[part_index].config_name = "%s (%d)" % (self.config_name, part_index)
logger.debug("- cloning configuration: %s -> %s",
self.parts[part_index].config_name, self.parts[part_index])
# Copy the configuration objects lists. We need a deepcopy because each configuration
# will have some new groups... but we create a new uuid
self.parts[part_index].uuid = get_a_new_object_id()
types_creations = self.__class__.types_creations
for o_type in types_creations:
(_, clss, inner_property, _, clonable) = types_creations[o_type]
if not clonable:
logger.debug(" . do not clone: %s", inner_property)
continue
# todo: Indeed contactgroups should be managed like hostgroups... to be confirmed!
if inner_property in ['hostgroups', 'servicegroups']:
new_groups = []
for group in getattr(self, inner_property):
new_groups.append(group.copy_shell())
setattr(self.parts[part_index], inner_property, clss(new_groups))
elif inner_property in ['hosts']:
setattr(self.parts[part_index], inner_property, clss([]))
# And include the templates
setattr(self.parts[part_index].hosts, 'templates', self.hosts.templates)
elif inner_property in ['services']:
setattr(self.parts[part_index], inner_property, clss([]))
# And include the templates
setattr(self.parts[part_index].services, 'templates', self.services.templates)
else:
setattr(self.parts[part_index], inner_property, getattr(self, inner_property))
logger.debug(" . cloned %s: %s -> %s", inner_property,
getattr(self, inner_property),
getattr(self.parts[part_index], inner_property))
# The elements of the other conf will be tag here
self.parts[part_index].other_elements = {}
# No scheduler has yet accepted the configuration
self.parts[part_index].is_assigned = False
self.parts[part_index].scheduler_link = None
self.parts[part_index].push_flavor = ''
# Once parts got created, the current configuration has some 'parts'
# self.parts is the configuration split into parts for the schedulers
# Just create packs. There can be numerous ones
# In pack we've got hosts and service and packs are in the realms
logger.debug("Creating packs for realms...")
self.create_packs()
# Once packs got created, all the realms have some 'packs'
logger.info("Realms:")
for realm in self.realms:
logger.info(" - realm: %s", realm)
for idx in realm.packs:
logger.info(" - pack: %s / %d hosts (%s)",
idx, len(realm.packs[idx]), ','.join([self.hosts[host_id].get_name()
for host_id in realm.packs[idx]]))
# We have packs for realms and elements into configurations, let's merge this...
logger.info("Realms:")
offset = 0
for realm in self.realms:
logger.info(" Realm: %s", realm)
for idx in realm.packs:
logger.info(" - pack: %s / %d hosts", idx, len(realm.packs[idx]))
if not realm.packs[idx]:
logger.info(" - no hosts are declared in this realm pack.")
# continue
try:
instance_id = self.parts[idx + offset].instance_id
for host_id in realm.packs[idx]:
host = self.hosts[host_id]
self.parts[idx + offset].hosts.add_item(host)
for service_id in host.services:
service = self.services[service_id]
self.parts[idx + offset].services.add_item(service)
# Now the conf can be linked with the realm
realm.parts.update({instance_id: self.parts[idx + offset]})
# offset += 1
except KeyError:
logger.info(" - no configuration part is affected "
"because of mismatching hosts packs / schedulers count. "
"Probably too much schedulers for the hosts count!")
offset += len(realm.packs)
del realm.packs
# We've nearly have hosts and services. Now we want real hosts (Class)
# And we want groups too
for part_index in self.parts:
cfg = self.parts[part_index]
# Fill host groups
for ori_hg in self.hostgroups:
hostgroup = cfg.hostgroups.find_by_name(ori_hg.get_name())
mbrs_id = []
for host in ori_hg.members:
if host != '':
mbrs_id.append(host)
for host in cfg.hosts:
if host.uuid in mbrs_id:
hostgroup.members.append(host.uuid)
# And also relink the hosts with the valid hostgroups
for item in cfg.hosts:
orig_hgs = item.hostgroups
nhgs = []
for ohg_id in orig_hgs:
ohg = self.hostgroups[ohg_id]
nhg = cfg.hostgroups.find_by_name(ohg.get_name())
nhgs.append(nhg.uuid)
item.hostgroups = nhgs
# Fill servicegroup
for ori_sg in self.servicegroups:
servicegroup = cfg.servicegroups.find_by_name(ori_sg.get_name())
mbrs = ori_sg.members
mbrs_id = []
for service in mbrs:
if service != '':
mbrs_id.append(service)
for service in cfg.services:
if service.uuid in mbrs_id:
servicegroup.members.append(service.uuid)
# And also relink the services with the valid servicegroups
for item in cfg.services:
orig_hgs = item.servicegroups
nhgs = []
for ohg_id in orig_hgs:
ohg = self.servicegroups[ohg_id]
nhg = cfg.servicegroups.find_by_name(ohg.get_name())
nhgs.append(nhg.uuid)
item.servicegroups = nhgs
# Now we fill other_elements by host (service are with their host
# so they are not tagged)
logger.info("Configuration parts:")
for part_index in self.parts:
for host in self.parts[part_index].hosts:
for j in [j for j in self.parts if j != part_index]: # So other than i
self.parts[part_index].other_elements[host.get_name()] = part_index
logger.info("- part: %d - %s, %d hosts", part_index, self.parts[part_index],
len(self.parts[part_index].hosts))
def prepare_for_sending(self):
"""The configuration needs to be serialized before being sent to a spare arbiter
:return: None
"""
if [arbiter_link for arbiter_link in self.arbiters if arbiter_link.spare]:
logger.info('Serializing the configuration for my spare arbiter...')
# Now serialize the whole configuration, for sending to spare arbiters
self.spare_arbiter_conf = serialize(self)
def dump(self, dump_file_name=None):
"""Dump configuration to a file in a JSON format
If no file name is provided, the function returns an object that can be json-ified
:param dump_file_name: the file to dump configuration to
:type dump_file_name: str
:return: None
"""
config_dump = {}
for _, _, category, _, _ in list(self.types_creations.values()):
try:
# Dump without the running properties and filter base properties
objs = [
jsonify_r(i, running_properties=False,
filter_base_properties=True) for i in getattr(self, category)
]
except (TypeError, AttributeError): # pragma: no cover, simple protection
logger.warning("Dumping configuration, '%s' not present in the configuration",
category)
continue
container = getattr(self, category)
if category == "services":
objs = sorted(objs,
key=lambda o: "%s/%s" % (o["host_name"], o["service_description"]))
elif hasattr(container, "name_property"):
# pylint: disable=cell-var-from-loop
objs = sorted(objs,
key=lambda o, prop=container.name_property: getattr(o, prop, ''))
config_dump[category] = objs
if not dump_file_name:
return config_dump
try:
logger.info('Dumping configuration to: %s', dump_file_name)
fd = open(dump_file_name, "w")
fd.write(json.dumps(config_dump, ensure_ascii=False, sort_keys=True,
indent=2, separators=(', ', ': '),
default=default_serialize))
fd.close()
logger.info('Dumped')
except (OSError, IndexError) as exp: # pragma: no cover, should never happen...
logger.critical("Error when dumping configuration to %s: %s",
dump_file_name, str(exp))
return json.dumps(config_dump, ensure_ascii=False, sort_keys=True,
indent=2, separators=(', ', ': '), default=default_serialize)
|
class Config(Item):
'''Config is the class that reads, loads and manipulates the main Alignak monitored
objects configuration. It reads the Nagios legacy configuration files (cfg files )
and gets all information from these files.
It creates the monitored objects (eg. hosts, contacts, ...), creates links between
them, check them, clean them, and cut them into independent parts.
The main user of this Config class is the Arbiter daemon when it loads the
configuration and dispatches to the other daemons.'''
def __init__(self, params=None, parsing=True):
pass
def __repr__(self):
pass
def serialize(self, no_json=True, printing=False):
pass
def clean_params(self, params):
'''Convert a list of parameters (key=value) into a dict
This function is used to transform Nagios (or ini) like formated parameters (key=value)
to a dictionary.
:param params: parameters list
:type params: list
:return: dict with key and value. Log error if malformed
:rtype: dict
'''
pass
def load_params(self, params):
'''Load parameters from main configuration file
:param params: parameters list (converted right at the beginning)
:type params:
:return: None
'''
pass
@staticmethod
def _cut_line(line):
'''Split the line on whitespaces and remove empty chunks
:param line: the line to split
:type line: str
:return: list of strings
:rtype: list
'''
pass
def read_legacy_cfg_files(self, cfg_files, alignak_env_files=None):
'''Read and parse the Nagios legacy configuration files
and store their content into a StringIO object which content
will be returned as the function result
:param cfg_files: list of file to read
:type cfg_files: list
:param alignak_env_files: name of the alignak environment file
:type alignak_env_files: list
:return: a buffer containing all files
:rtype: str
'''
pass
def read_config_buf(self, cfg_buffer):
'''The legacy configuration buffer (previously returned by Config.read_config())
If the buffer is empty, it will return an empty dictionary else it will return a
dictionary containing dictionary items tha tmay be used to create Alignak
objects
:param cfg_buffer: buffer containing all data from config files
:type cfg_buffer: str
:return: dict of alignak objects with the following structure ::
{ type1 : [{key: value, ..}, {..}],
type2 : [ ... ]
}
Example ::
{ 'host' : [{'host_name': 'myhostname', ..}, {..}],
'service' : [ ... ]
}
Values are all str for now. It is pythonized at object creation
:rtype: dict
'''
pass
@staticmethod
def add_self_defined_objects(raw_objects):
'''Add self defined command objects for internal processing ;
bp_rule, _internal_host_up, _echo, _internal_host_check, _interna_service_check
:param raw_objects: Raw config objects dict
:type raw_objects: dict
:return: raw_objects with some more commands
:rtype: dict
'''
pass
def early_create_objects(self, raw_objects):
'''Create the objects needed for the post configuration file initialization
:param raw_objects: dict with all object with str values
:type raw_objects: dict
:return: None
'''
pass
def create_objects(self, raw_objects):
'''Create all the objects got after the post configuration file initialization
:param raw_objects: dict with all object with str values
:type raw_objects: dict
:return: None
'''
pass
def create_objects_for_type(self, raw_objects, o_type):
'''Generic function to create objects regarding the o_type
This function create real Alignak objects from the raw data got from the configuration.
:param raw_objects: Raw objects
:type raw_objects: dict
:param o_type: the object type we want to create
:type o_type: object
:return: None
'''
pass
def early_arbiter_linking(self, arbiter_name, params):
''' Prepare the arbiter for early operations
:param arbiter_name: default arbiter name if no arbiter exist in the configuration
:type arbiter_name: str
:return: None
'''
pass
def linkify_one_command_with_commands(self, commands, prop):
'''
Link a command call (executable) with a configured commad
:param commands: object commands
:type commands: alignak.objects.command.Commands
:param prop: property name
:type prop: str
:return: None
'''
pass
def linkify_one_command_with_commands(self, commands, prop):
''' Make 'links' between elements, like a host got a services list
with all its services in it
:return: None
'''
pass
def clean_params(self, params):
'''Wrapper for calling the clean method of all the configuration objects
:return: None
'''
pass
def warn_about_unmanaged_parameters(self):
'''used to raise warning if the user got parameter
that we do not manage from now
:return: None
'''
pass
def override_properties(self):
'''Wrapper for calling override_properties method of services attribute
:return:
'''
pass
def explode(self):
'''Use to fill groups values on hosts and create new services
(for host group ones)
:return: None
'''
pass
def apply_dependencies(self):
'''Creates dependencies links between elements.
:return: None
'''
pass
def apply_inheritance(self):
'''Apply inheritance from the templates
Templates can be used in the following objects:
* hosts
* contacts
* services
* services dependencies
* hosts dependencies
* timeperiods
* hosts extinfo
* services extinfo
* service escalations
* host escalations
* escalations
:return: None
'''
pass
def apply_implicit_inheritance(self):
'''Wrapper for calling apply_implicit_inheritance method of services attributes
Implicit inheritance is between host and service (like notification parameters etc)
:return:None
'''
pass
def fill_default_configuration(self):
'''Fill objects properties with default value if necessary
:return: None
'''
pass
def fill_default_realm(self):
'''Check if a realm is defined, if not
Create a new one (default) and tag everyone that do not have
a realm prop to be put in this realm
:return: None
'''
pass
def log_daemons_list(self):
'''Log Alignak daemons list
:return:
'''
pass
def fill_default_satellites(self, alignak_launched=False):
'''If a required satellite is missing in the configuration, we create a new satellite
on localhost with some default values
:param alignak_launched: created daemons are to be launched or not
:type alignak_launched: bool
:return: None
'''
pass
def got_broker_module_type_defined(self, module_type):
'''Check if a module type is defined in one of the brokers
:param module_type: module type to search for
:type module_type: str
:return: True if mod_type is found else False
:rtype: bool
'''
pass
def got_scheduler_module_type_defined(self, module_type):
'''Check if a module type is defined in one of the schedulers
:param module_type: module type to search for
:type module_type: str
:return: True if mod_type is found else False
:rtype: bool
TODO: Factorize it with got_broker_module_type_defined
'''
pass
def got_arbiter_module_type_defined(self, module_type):
'''Check if a module type is defined in one of the arbiters
Also check the module name
:param module_type: module type to search for
:type module_type: str
:return: True if mod_type is found else False
:rtype: bool
TODO: Factorize it with got_broker_module_type_defined:
'''
pass
def create_business_rules(self):
'''Create business rules for hosts and services
:return: None
'''
pass
def create_business_rules_dependencies(self):
'''Create business rules dependencies for hosts and services
:return: None
'''
pass
def hack_old_nagios_parameters(self):
''' Check if modules exist for some of the Nagios legacy parameters.
If no module of the required type is present, it alerts the user that the parameters will
be ignored and the functions will be disabled, else it encourages the user to set the
correct parameters in the installed modules.
Note that some errors are raised if some parameters are used and no module is found
to manage the corresponding feature.
TODO: clean this part of the configuration checking! Nagios ascending compatibility!
:return: modules list
:rtype: list
'''
pass
def propagate_timezone_option(self):
'''Set our timezone value and give it too to unset satellites
:return: None
'''
pass
def linkify_templates(self):
''' Like for normal object, we link templates with each others
:return: None
'''
pass
def check_error_on_hard_unmanaged_parameters(self):
'''Some parameters are just not managed like O*HP commands and regexp capabilities
:return: True if we encounter an error, otherwise False
:rtype: bool
'''
pass
def is_correct(self):
'''Check if all elements got a good configuration
:return: True if the configuration is correct else False
:rtype: bool
'''
pass
def explode_global_conf(self):
'''Explode parameters like cached_service_check_horizon in the
Service class in a cached_check_horizon manner, o*hp commands etc
:return: None
'''
pass
def remove_templates(self):
'''Clean useless elements like templates because they are not needed anymore
:return: None
'''
pass
def show_errors(self):
'''
Loop over configuration warnings and log them as INFO log
Loop over configuration errors and log them as INFO log
Note that the warnings and errors are logged on the fly during the configuration parsing.
It is not necessary to log as WARNING and ERROR in this function which is used as a sum-up
on the end of configuration parsing when an error has been detected.
:return: None
'''
pass
def create_packs(self):
'''Create packs of hosts and services (all dependencies are resolved)
It create a graph. All hosts are connected to their
parents, and hosts without parent are connected to host 'root'.
services are linked to their host. Dependencies between hosts/services are managed.
REF: doc/pack-creation.png
:return: None
'''
pass
def cut_into_parts(self):
'''Cut conf into part for scheduler dispatch.
Basically it provides a set of host/services for each scheduler that
have no dependencies between them
:return: None
'''
pass
def prepare_for_sending(self):
'''The configuration needs to be serialized before being sent to a spare arbiter
:return: None
'''
pass
def dump(self, dump_file_name=None):
'''Dump configuration to a file in a JSON format
If no file name is provided, the function returns an object that can be json-ified
:param dump_file_name: the file to dump configuration to
:type dump_file_name: str
:return: None
'''
pass
| 46 | 41 | 51 | 6 | 33 | 12 | 9 | 0.34 | 1 | 33 | 20 | 0 | 41 | 23 | 43 | 77 | 3,029 | 473 | 1,933 | 291 | 1,887 | 648 | 1,188 | 287 | 1,144 | 60 | 3 | 8 | 375 |
4,104 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/contact.py
|
alignak.objects.contact.Contact
|
class Contact(Item):
"""Host class implements monitoring concepts for contact.
For example it defines host_notification_period, service_notification_period etc.
"""
my_type = 'contact'
my_name_property = "%s_name" % my_type
properties = Item.properties.copy()
properties.update({
'contact_name':
StringProp(fill_brok=[FULL_STATUS]),
'alias':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'contactgroups':
ListProp(default=[], fill_brok=[FULL_STATUS]),
# Those properties must be identical to the corresponding properties
# of the Notificationway object
'host_notifications_enabled':
BoolProp(default=True, fill_brok=[FULL_STATUS]),
'service_notifications_enabled':
BoolProp(default=True, fill_brok=[FULL_STATUS]),
'host_notification_period':
StringProp(default='', fill_brok=[FULL_STATUS]),
'service_notification_period':
StringProp(default='', fill_brok=[FULL_STATUS]),
'host_notification_options':
ListProp(default=[], fill_brok=[FULL_STATUS], split_on_comma=True),
'service_notification_options':
ListProp(default=[], fill_brok=[FULL_STATUS], split_on_comma=True),
'host_notification_commands':
ListProp(default=[], fill_brok=[FULL_STATUS]),
'service_notification_commands':
ListProp(default=[], fill_brok=[FULL_STATUS]),
'min_business_impact':
IntegerProp(default=0, fill_brok=[FULL_STATUS]),
'email':
StringProp(default=u'none', fill_brok=[FULL_STATUS]),
'pager':
StringProp(default=u'none', fill_brok=[FULL_STATUS]),
'address1':
StringProp(default=u'none', fill_brok=[FULL_STATUS]),
'address2':
StringProp(default=u'none', fill_brok=[FULL_STATUS]),
'address3':
StringProp(default=u'none', fill_brok=[FULL_STATUS]),
'address4':
StringProp(default=u'none', fill_brok=[FULL_STATUS]),
'address5':
StringProp(default=u'none', fill_brok=[FULL_STATUS]),
'address6':
StringProp(default=u'none', fill_brok=[FULL_STATUS]),
'can_submit_commands':
BoolProp(default=False, fill_brok=[FULL_STATUS]),
'is_admin':
BoolProp(default=False, fill_brok=[FULL_STATUS]),
'expert':
BoolProp(default=False, fill_brok=[FULL_STATUS]),
'retain_status_information':
BoolProp(default=True, fill_brok=[FULL_STATUS]),
'notificationways':
ListProp(default=[], fill_brok=[FULL_STATUS]),
'password':
StringProp(default=u'NOPASSWORDSET', fill_brok=[FULL_STATUS]),
})
running_properties = Item.running_properties.copy()
running_properties.update({
'modified_attributes':
IntegerProp(default=0, fill_brok=[FULL_STATUS], retention=True),
'modified_host_attributes':
IntegerProp(default=0, fill_brok=[FULL_STATUS], retention=True),
'modified_service_attributes':
IntegerProp(default=0, fill_brok=[FULL_STATUS], retention=True),
'in_scheduled_downtime':
BoolProp(default=False, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True),
'broks':
ListProp(default=[]), # and here broks raised
'customs':
DictProp(default={}, fill_brok=[FULL_STATUS]),
})
# This tab is used to transform old parameters name into new ones
# so from Nagios2 format, to Nagios3 ones.
# Or Alignak deprecated names like criticity
old_properties = {
'min_criticity': 'min_business_impact',
}
macros = {
'CONTACTNAME': 'contact_name',
'CONTACTALIAS': 'alias',
'CONTACTEMAIL': 'email',
'CONTACTPAGER': 'pager',
'CONTACTADDRESS1': 'address1',
'CONTACTADDRESS2': 'address2',
'CONTACTADDRESS3': 'address3',
'CONTACTADDRESS4': 'address4',
'CONTACTADDRESS5': 'address5',
'CONTACTADDRESS6': 'address6',
'CONTACTGROUPNAME': 'get_groupname',
'CONTACTGROUPNAMES': 'get_groupnames'
}
special_properties = (
'service_notification_commands', 'host_notification_commands',
'service_notification_period', 'host_notification_period',
'service_notification_options', 'host_notification_options',
'contact_name'
)
simple_way_parameters = (
'service_notification_period', 'host_notification_period',
'service_notification_options', 'host_notification_options',
'service_notification_commands', 'host_notification_commands',
'min_business_impact'
)
def __init__(self, params, parsing=True):
# When deserialized, those are dict
if not parsing:
for prop in ['service_notification_commands', 'host_notification_commands']:
if prop not in params:
continue
# We recreate the list of objects
new_list = [unserialize(elem, True) for elem in params[prop]]
setattr(self, prop, new_list)
# And remove prop, to prevent from being overridden
del params[prop]
super(Contact, self).__init__(params, parsing=parsing)
def __str__(self): # pragma: no cover
return '<Contact%s %s, uuid=%s, use: %s />' \
% (' template' if self.is_a_template() else '', self.get_full_name(), self.uuid,
getattr(self, 'tags', None))
__repr__ = __str__
def get_full_name(self):
"""Get the full name of the contact
:return: service full name
:rtype: str
"""
name = self.get_name()
if getattr(self, 'display_name', None):
name = "({}) {}".format(getattr(self, 'display_name'), name)
elif getattr(self, 'alias', None) and getattr(self, 'alias', None) != 'none':
name = "({}) {}".format(getattr(self, 'alias'), name)
return name
def get_groupname(self):
"""
Get the first group name whose contact belongs to
:return: group name
:rtype: str
"""
if self.contactgroups:
return self.contactgroups[0]
return 'Unknown'
def get_groupnames(self):
"""
Get all the groups name whose contact belongs to
:return: comma separated list of the groups names
:rtype: str
"""
if self.contactgroups:
return ', '.join(self.contactgroups)
return 'Unknown'
def want_service_notification(self, notifways, timeperiods,
timestamp, state, n_type, business_impact, cmd=None):
"""Check if notification options match the state of the service
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("WARNING", "CRITICAL" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this service
:type business_impact: int
:param cmd: command launched to notify the contact
:type cmd: str
:return: True if contact wants notification, otherwise False
:rtype: bool
"""
if not self.service_notifications_enabled:
return False
# If we are in downtime, we do not want notification
for downtime_id in self.downtimes:
downtime = self.downtimes[downtime_id]
if downtime.is_in_effect:
self.in_scheduled_downtime = True
return False
self.in_scheduled_downtime = False
# Now the rest is for sub notificationways. If one is OK, we are ok
# We will filter in another phase
for notifway_id in self.notificationways:
notifway = notifways[notifway_id]
nw_b = notifway.want_service_notification(timeperiods, timestamp,
state, n_type, business_impact, cmd)
if nw_b:
return True
# Oh... no one is ok for it? so no, sorry
return False
def want_host_notification(self, notifways, timeperiods, timestamp, state, n_type,
business_impact, cmd=None):
"""Check if notification options match the state of the host
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("UP", "DOWN" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this host
:type business_impact: int
:param cmd: command launch to notify the contact
:type cmd: str
:return: True if contact wants notification, otherwise False
:rtype: bool
"""
if not self.host_notifications_enabled:
return False
# If we are in downtime, we do not want notification
for downtime in self.downtimes:
if downtime.is_in_effect:
self.in_scheduled_downtime = True
return False
self.in_scheduled_downtime = False
# Now it's all for sub notificationways. If one is OK, we are OK
# We will filter in another phase
for notifway_id in self.notificationways:
notifway = notifways[notifway_id]
nw_b = notifway.want_host_notification(timeperiods, timestamp,
state, n_type, business_impact, cmd)
if nw_b:
return True
# Oh, nobody..so NO :)
return False
def get_notification_commands(self, notifways, n_type, command_name=False):
"""Get notification commands for object type
:param notifways: list of alignak.objects.NotificationWay objects
:type notifways: NotificationWays
:param n_type: object type (host or service)
:type n_type: string
:param command_name: True to update the inner property with the name of the command,
False to update with the Command objects list
:type command_name: bool
:return: command list
:rtype: list[alignak.objects.command.Command]
"""
res = []
for notifway_id in self.notificationways:
notifway = notifways[notifway_id]
res.extend(notifway.get_notification_commands(n_type))
# Update inner notification commands property with command name or command
setattr(self, n_type + '_notification_commands', res)
if command_name:
setattr(self, n_type + '_notification_commands', [c.get_name() for c in res])
return res
def is_correct(self):
"""Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
# Internal checks before executing inherited function...
if not hasattr(self, 'contact_name'):
if hasattr(self, 'alias'):
# Use the alias if we miss the contact_name
self.contact_name = self.alias
# There is a case where there is no nw: when there is not special_prop defined
# at all!!
if not getattr(self, 'notificationways', None):
for prop in self.special_properties:
if not hasattr(self, prop):
self.add_error("[contact::%s] %s property is missing"
% (self.get_name(), prop))
for char in self.__class__.illegal_object_name_chars:
if char not in self.contact_name:
continue
self.add_error("[contact::%s] %s character not allowed in contact_name"
% (self.get_name(), char))
return super(Contact, self).is_correct() and self.conf_is_correct
def raise_enter_downtime_log_entry(self):
"""Raise CONTACT DOWNTIME ALERT entry (info level)
Format is : "CONTACT DOWNTIME ALERT: *get_name()*;STARTED;
Contact has entered a period of scheduled downtime"
Example : "CONTACT DOWNTIME ALERT: test_contact;STARTED;
Contact has entered a period of scheduled downtime"
:return: None
"""
brok = make_monitoring_log(
'info', "CONTACT DOWNTIME ALERT: %s;STARTED; "
"Contact has entered a period of scheduled downtime" % self.get_name()
)
self.broks.append(brok)
def raise_exit_downtime_log_entry(self):
"""Raise CONTACT DOWNTIME ALERT entry (info level)
Format is : "CONTACT DOWNTIME ALERT: *get_name()*;STOPPED;
Contact has entered a period of scheduled downtime"
Example : "CONTACT DOWNTIME ALERT: test_contact;STOPPED;
Contact has entered a period of scheduled downtime"
:return: None
"""
brok = make_monitoring_log(
'info', "CONTACT DOWNTIME ALERT: %s;STOPPED; "
"Contact has exited from a period of scheduled downtime" % self.get_name()
)
self.broks.append(brok)
def raise_cancel_downtime_log_entry(self):
"""Raise CONTACT DOWNTIME ALERT entry (info level)
Format is : "CONTACT DOWNTIME ALERT: *get_name()*;CANCELLED;
Contact has entered a period of scheduled downtime"
Example : "CONTACT DOWNTIME ALERT: test_contact;CANCELLED;
Contact has entered a period of scheduled downtime"
:return: None
"""
brok = make_monitoring_log(
'info', "CONTACT DOWNTIME ALERT: %s;CANCELLED; "
"Scheduled downtime for contact has been cancelled." % self.get_name()
)
self.broks.append(brok)
|
class Contact(Item):
'''Host class implements monitoring concepts for contact.
For example it defines host_notification_period, service_notification_period etc.
'''
def __init__(self, params, parsing=True):
pass
def __str__(self):
pass
def get_full_name(self):
'''Get the full name of the contact
:return: service full name
:rtype: str
'''
pass
def get_groupname(self):
'''
Get the first group name whose contact belongs to
:return: group name
:rtype: str
'''
pass
def get_groupnames(self):
'''
Get all the groups name whose contact belongs to
:return: comma separated list of the groups names
:rtype: str
'''
pass
def want_service_notification(self, notifways, timeperiods,
timestamp, state, n_type, business_impact, cmd=None):
'''Check if notification options match the state of the service
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("WARNING", "CRITICAL" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this service
:type business_impact: int
:param cmd: command launched to notify the contact
:type cmd: str
:return: True if contact wants notification, otherwise False
:rtype: bool
'''
pass
def want_host_notification(self, notifways, timeperiods, timestamp, state, n_type,
business_impact, cmd=None):
'''Check if notification options match the state of the host
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("UP", "DOWN" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this host
:type business_impact: int
:param cmd: command launch to notify the contact
:type cmd: str
:return: True if contact wants notification, otherwise False
:rtype: bool
'''
pass
def get_notification_commands(self, notifways, n_type, command_name=False):
'''Get notification commands for object type
:param notifways: list of alignak.objects.NotificationWay objects
:type notifways: NotificationWays
:param n_type: object type (host or service)
:type n_type: string
:param command_name: True to update the inner property with the name of the command,
False to update with the Command objects list
:type command_name: bool
:return: command list
:rtype: list[alignak.objects.command.Command]
'''
pass
def is_correct(self):
'''Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
'''
pass
def raise_enter_downtime_log_entry(self):
'''Raise CONTACT DOWNTIME ALERT entry (info level)
Format is : "CONTACT DOWNTIME ALERT: *get_name()*;STARTED;
Contact has entered a period of scheduled downtime"
Example : "CONTACT DOWNTIME ALERT: test_contact;STARTED;
Contact has entered a period of scheduled downtime"
:return: None
'''
pass
def raise_exit_downtime_log_entry(self):
'''Raise CONTACT DOWNTIME ALERT entry (info level)
Format is : "CONTACT DOWNTIME ALERT: *get_name()*;STOPPED;
Contact has entered a period of scheduled downtime"
Example : "CONTACT DOWNTIME ALERT: test_contact;STOPPED;
Contact has entered a period of scheduled downtime"
:return: None
'''
pass
def raise_cancel_downtime_log_entry(self):
'''Raise CONTACT DOWNTIME ALERT entry (info level)
Format is : "CONTACT DOWNTIME ALERT: *get_name()*;CANCELLED;
Contact has entered a period of scheduled downtime"
Example : "CONTACT DOWNTIME ALERT: test_contact;CANCELLED;
Contact has entered a period of scheduled downtime"
:return: None
'''
pass
| 13 | 11 | 19 | 2 | 9 | 8 | 3 | 0.51 | 1 | 1 | 0 | 0 | 12 | 2 | 12 | 46 | 355 | 45 | 206 | 46 | 191 | 106 | 97 | 44 | 84 | 8 | 3 | 3 | 39 |
4,105 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/contactgroup.py
|
alignak.objects.contactgroup.Contactgroup
|
class Contactgroup(Itemgroup):
"""Class to manage a group of contacts
A Contactgroup is used to manage a group of contacts
"""
my_type = 'contactgroup'
my_name_property = "%s_name" % my_type
members_property = "members"
group_members_property = "%s_members" % my_type
properties = Itemgroup.properties.copy()
properties.update({
'contactgroup_name':
StringProp(fill_brok=[FULL_STATUS]),
'alias':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'contactgroup_members':
ListProp(default=[], fill_brok=[FULL_STATUS], merging='join', split_on_comma=True)
})
macros = {
'CONTACTGROUPNAME': 'contactgroup_name',
'CONTACTGROUPALIAS': 'alias',
'CONTACTGROUPMEMBERS': 'get_members',
'CONTACTGROUPGROUPMEMBERS': 'get_contactgroup_members'
}
def get_contacts(self):
"""Get the contacts of the group
:return: list of contacts
:rtype: list[alignak.objects.contact.Contact]
"""
return super(Contactgroup, self).get_members()
def get_contactgroup_members(self):
"""Get the groups members of the group
:return: list of contacts
:rtype: list
"""
return getattr(self, 'contactgroup_members', [])
def get_contacts_by_explosion(self, contactgroups):
# pylint: disable=access-member-before-definition
"""
Get contacts of this group
:param contactgroups: Contactgroups object, use to look for a specific one
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: list of contact of this group
:rtype: list[alignak.objects.contact.Contact]
"""
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_exploded = True
# Now the recursive part
# rec_tag is set to False every CG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[contactgroup::%s] got a loop in contactgroup definition",
self.get_name())
if hasattr(self, 'members'):
return self.members
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
cg_mbrs = self.get_contactgroup_members()
for cg_mbr in cg_mbrs:
contactgroup = contactgroups.find_by_name(cg_mbr.strip())
if contactgroup is not None:
value = contactgroup.get_contacts_by_explosion(contactgroups)
if value is not None:
self.add_members(value)
if hasattr(self, 'members'):
return self.members
return ''
|
class Contactgroup(Itemgroup):
'''Class to manage a group of contacts
A Contactgroup is used to manage a group of contacts
'''
def get_contacts(self):
'''Get the contacts of the group
:return: list of contacts
:rtype: list[alignak.objects.contact.Contact]
'''
pass
def get_contactgroup_members(self):
'''Get the groups members of the group
:return: list of contacts
:rtype: list
'''
pass
def get_contacts_by_explosion(self, contactgroups):
'''
Get contacts of this group
:param contactgroups: Contactgroups object, use to look for a specific one
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: list of contact of this group
:rtype: list[alignak.objects.contact.Contact]
'''
pass
| 4 | 4 | 18 | 2 | 8 | 8 | 3 | 0.6 | 1 | 1 | 0 | 0 | 3 | 2 | 3 | 47 | 82 | 13 | 43 | 16 | 39 | 26 | 30 | 16 | 26 | 7 | 4 | 3 | 9 |
4,106 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/contactgroup.py
|
alignak.objects.contactgroup.Contactgroups
|
class Contactgroups(Itemgroups):
"""Class to manage list of Contactgroup
Contactgroups is used to regroup all Contactgroup
"""
inner_class = Contactgroup
def add_member(self, contact_name, contactgroup_name):
"""Add a contact string to a contact member
if the contact group do not exist, create it
:param contact_name: contact name
:type contact_name: str
:param contactgroup_name: contact group name
:type contactgroup_name: str
:return: None
"""
group = self.find_by_name(contactgroup_name)
if group:
group.add_members(contact_name)
return
group = Contactgroup({
'contactgroup_name': contactgroup_name, 'members': contact_name})
self.add_contactgroup(group)
def get_members_of_group(self, gname):
"""Get all members of a group which name is given in parameter
:param gname: name of the group
:type gname: str
:return: list of contacts in the group
:rtype: list[alignak.objects.contact.Contact]
"""
contactgroup = self.find_by_name(gname)
if contactgroup:
return contactgroup.get_contacts()
return []
def add_contactgroup(self, contactgroup):
"""Wrapper for add_item method
Add a contactgroup to the contactgroup list
:param contactgroup: contact group to add
:type contactgroup:
:return: None
"""
self.add_item(contactgroup)
def linkify(self, contacts):
"""Create link between objects::
* contactgroups -> contacts
:param contacts: contacts to link
:type contacts: alignak.objects.contact.Contacts
:return: None
"""
self.linkify_contactgroups_contacts(contacts)
def linkify_contactgroups_contacts(self, contacts):
"""Link the contacts with contactgroups
:param contacts: realms object to link with
:type contacts: alignak.objects.contact.Contacts
:return: None
"""
for contactgroup in self:
mbrs = contactgroup.get_contacts()
# The new member list, in id
new_mbrs = []
for mbr in mbrs:
mbr = mbr.strip() # protect with strip at the beginning so don't care about spaces
if mbr == '': # void entry, skip this
continue
member = contacts.find_by_name(mbr)
# Maybe the contact is missing, if so, must be put in unknown_members
if member is not None:
new_mbrs.append(member.uuid)
else:
contactgroup.add_unknown_members(mbr)
# Make members uniq
new_mbrs = list(set(new_mbrs))
# We find the id, we replace the names
contactgroup.replace_members(new_mbrs)
def explode(self):
"""
Fill members with contactgroup_members
:return:None
"""
# We do not want a same hg to be explode again and again
# so we tag it
for tmp_cg in list(self.items.values()):
tmp_cg.already_exploded = False
for contactgroup in list(self.items.values()):
if contactgroup.already_exploded:
continue
# get_contacts_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_cg in list(self.items.values()):
tmp_cg.rec_tag = False
contactgroup.get_contacts_by_explosion(self)
# We clean the tags
for tmp_cg in list(self.items.values()):
if hasattr(tmp_cg, 'rec_tag'):
del tmp_cg.rec_tag
del tmp_cg.already_exploded
|
class Contactgroups(Itemgroups):
'''Class to manage list of Contactgroup
Contactgroups is used to regroup all Contactgroup
'''
def add_member(self, contact_name, contactgroup_name):
'''Add a contact string to a contact member
if the contact group do not exist, create it
:param contact_name: contact name
:type contact_name: str
:param contactgroup_name: contact group name
:type contactgroup_name: str
:return: None
'''
pass
def get_members_of_group(self, gname):
'''Get all members of a group which name is given in parameter
:param gname: name of the group
:type gname: str
:return: list of contacts in the group
:rtype: list[alignak.objects.contact.Contact]
'''
pass
def add_contactgroup(self, contactgroup):
'''Wrapper for add_item method
Add a contactgroup to the contactgroup list
:param contactgroup: contact group to add
:type contactgroup:
:return: None
'''
pass
def linkify(self, contacts):
'''Create link between objects::
* contactgroups -> contacts
:param contacts: contacts to link
:type contacts: alignak.objects.contact.Contacts
:return: None
'''
pass
def linkify_contactgroups_contacts(self, contacts):
'''Link the contacts with contactgroups
:param contacts: realms object to link with
:type contacts: alignak.objects.contact.Contacts
:return: None
'''
pass
def explode(self):
'''
Fill members with contactgroup_members
:return:None
'''
pass
| 7 | 7 | 17 | 2 | 8 | 8 | 3 | 1.04 | 1 | 3 | 1 | 0 | 6 | 0 | 6 | 52 | 115 | 21 | 47 | 17 | 40 | 49 | 45 | 17 | 38 | 7 | 3 | 3 | 18 |
4,107 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/escalation.py
|
alignak.objects.escalation.Escalation
|
class Escalation(Item):
"""Escalation class is used to implement notification escalation
"""
my_type = 'escalation'
my_name_property = "%s_name" % my_type
properties = Item.properties.copy()
properties.update({
'escalation_name':
StringProp(),
'host_name':
StringProp(default=''),
'hostgroup_name':
StringProp(''),
'service_description':
StringProp(default=''),
'first_notification':
IntegerProp(),
'last_notification':
IntegerProp(),
'first_notification_time':
IntegerProp(),
'last_notification_time':
IntegerProp(),
# As a default don't use the notification_interval defined in
# the escalation, but the one defined in the object
'notification_interval':
IntegerProp(default=-1),
'escalation_period':
StringProp(default=''),
'escalation_options':
ListProp(default=['d', 'x', 'r', 'w', 'c'], split_on_comma=True),
'contacts':
ListProp(default=[], split_on_comma=True),
'contact_groups':
ListProp(default=[], split_on_comma=True),
})
running_properties = Item.running_properties.copy()
running_properties.update({
'time_based': BoolProp(default=False),
})
special_properties = ('contacts', 'contact_groups',
'first_notification_time', 'last_notification_time')
special_properties_time_based = ('contacts', 'contact_groups',
'first_notification', 'last_notification')
def __init__(self, params, parsing=True):
# Update default options
for prop in ['escalation_options']:
if prop in params:
params[prop] = [p.replace('u', 'x') for p in params[prop]]
super(Escalation, self).__init__(params, parsing=parsing)
self.fill_default()
def is_eligible(self, timestamp, status, notif_number, in_notif_time, interval, escal_period):
# pylint: disable=too-many-return-statements
"""Check if the escalation is eligible (notification is escalated or not)
Escalation is NOT eligible in ONE of the following condition is fulfilled::
* escalation is not time based and notification number not in range
[first_notification;last_notification] (if last_notif == 0, it's infinity)
* escalation is time based and notification time not in range
[first_notification_time;last_notification_time] (if last_notif_time == 0, it's infinity)
* status does not matches escalation_options ('WARNING' <=> 'w' ...)
* escalation_period is not legit for this time (now usually)
:param timestamp: timestamp to check if timeperiod is valid
:type timestamp: int
:param status: item status (one of the small_states key)
:type status: str
:param notif_number: current notification number
:type notif_number: int
:param in_notif_time: current notification time
:type in_notif_time: int
:param interval: time interval length
:type interval: int
:return: True if no condition has been fulfilled, otherwise False
:rtype: bool
"""
short_states = {
u'WARNING': 'w', u'UNKNOWN': 'u', u'CRITICAL': 'c',
u'RECOVERY': 'r', u'FLAPPING': 'f', u'DOWNTIME': 's',
u'DOWN': 'd', u'UNREACHABLE': 'x', u'OK': 'o', u'UP': 'o'
}
# If we are not time based, we check notification numbers:
if not self.time_based:
# Begin with the easy cases
if notif_number < self.first_notification:
return False
# self.last_notification = 0 mean no end
if self.last_notification and notif_number > self.last_notification:
return False
# Else we are time based, we must check for the good value
else:
# Begin with the easy cases
if in_notif_time < self.first_notification_time * interval:
return False
if self.last_notification_time and \
in_notif_time > self.last_notification_time * interval:
return False
# If our status is not good, we bail out too
if status in short_states and short_states[status] not in self.escalation_options:
return False
# Maybe the time is not in our escalation_period
if escal_period is not None and not escal_period.is_time_valid(timestamp):
return False
# Ok, I do not see why not escalade. So it's True :)
return True
def get_next_notif_time(self, t_wished, status, creation_time, interval, escal_period):
"""Get the next notification time for the escalation
Only legit for time based escalation
:param t_wished: time we would like to send a new notification (usually now)
:type t_wished:
:param status: status of the host or service
:type status:
:param creation_time: time the notification was created
:type creation_time:
:param interval: time interval length
:type interval: int
:return: timestamp for next notification or None
:rtype: int | None
"""
short_states = {u'WARNING': 'w', u'UNKNOWN': 'u', u'CRITICAL': 'c',
u'RECOVERY': 'r', u'FLAPPING': 'f', u'DOWNTIME': 's',
u'DOWN': 'd', u'UNREACHABLE': 'u', u'OK': 'o', u'UP': 'o'}
# If we are not time based, we bail out!
if not self.time_based:
return None
# Check if we are valid
if status in short_states and short_states[status] not in self.escalation_options:
return None
# Look for the min of our future validity
start = self.first_notification_time * interval + creation_time
# If we are after the classic next time, we are not asking for a smaller interval
if start > t_wished:
return None
# Maybe the time we found is not a valid one....
if escal_period is not None and not escal_period.is_time_valid(start):
return None
# Ok so I ask for my start as a possibility for the next notification time
return start
def is_correct(self):
"""Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
state = True
# Internal checks before executing inherited function...
# If we got the _time parameters, we are time based. Unless, we are not :)
if hasattr(self, 'first_notification_time') or hasattr(self, 'last_notification_time'):
self.time_based = True
# Ok now we manage special cases...
if not hasattr(self, 'contacts') and not hasattr(self, 'contact_groups'):
self.add_error('%s: I do not have contacts nor contact_groups' % (self.get_name()))
state = False
# If time_based or not, we do not check all properties
if self.time_based:
if not hasattr(self, 'first_notification_time'):
self.add_error('%s: I do not have first_notification_time' % (self.get_name()))
state = False
if not hasattr(self, 'last_notification_time'):
self.add_error('%s: I do not have last_notification_time' % (self.get_name()))
state = False
else: # we check classical properties
if not hasattr(self, 'first_notification'):
self.add_error('%s: I do not have first_notification' % (self.get_name()))
state = False
if not hasattr(self, 'last_notification'):
self.add_error('%s: I do not have last_notification' % (self.get_name()))
state = False
# Change the special_properties definition according to time_based ...
save_special_properties = self.special_properties
if self.time_based:
self.special_properties = self.special_properties_time_based
state_parent = super(Escalation, self).is_correct()
if self.time_based:
self.special_properties = save_special_properties
return state_parent and state
|
class Escalation(Item):
'''Escalation class is used to implement notification escalation
'''
def __init__(self, params, parsing=True):
pass
def is_eligible(self, timestamp, status, notif_number, in_notif_time, interval, escal_period):
'''Check if the escalation is eligible (notification is escalated or not)
Escalation is NOT eligible in ONE of the following condition is fulfilled::
* escalation is not time based and notification number not in range
[first_notification;last_notification] (if last_notif == 0, it's infinity)
* escalation is time based and notification time not in range
[first_notification_time;last_notification_time] (if last_notif_time == 0, it's infinity)
* status does not matches escalation_options ('WARNING' <=> 'w' ...)
* escalation_period is not legit for this time (now usually)
:param timestamp: timestamp to check if timeperiod is valid
:type timestamp: int
:param status: item status (one of the small_states key)
:type status: str
:param notif_number: current notification number
:type notif_number: int
:param in_notif_time: current notification time
:type in_notif_time: int
:param interval: time interval length
:type interval: int
:return: True if no condition has been fulfilled, otherwise False
:rtype: bool
'''
pass
def get_next_notif_time(self, t_wished, status, creation_time, interval, escal_period):
'''Get the next notification time for the escalation
Only legit for time based escalation
:param t_wished: time we would like to send a new notification (usually now)
:type t_wished:
:param status: status of the host or service
:type status:
:param creation_time: time the notification was created
:type creation_time:
:param interval: time interval length
:type interval: int
:return: timestamp for next notification or None
:rtype: int | None
'''
pass
def is_correct(self):
'''Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
'''
pass
| 5 | 4 | 40 | 7 | 18 | 16 | 7 | 0.6 | 1 | 1 | 0 | 0 | 4 | 1 | 4 | 38 | 210 | 35 | 110 | 19 | 105 | 66 | 70 | 19 | 65 | 10 | 3 | 2 | 26 |
4,108 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/escalation.py
|
alignak.objects.escalation.Escalations
|
class Escalations(Items):
"""Escalations manage a list of Escalation objects, used for parsing configuration
"""
name_property = "escalation_name"
inner_class = Escalation
def linkify(self, timeperiods, contacts, services, hosts):
"""Create link between objects::
* escalation -> host
* escalation -> service
* escalation -> timeperiods
* escalation -> contact
:param timeperiods: timeperiods to link
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param contacts: contacts to link
:type contacts: alignak.objects.contact.Contacts
:param services: services to link
:type services: alignak.objects.service.Services
:param hosts: hosts to link
:type hosts: alignak.objects.host.Hosts
:return: None
"""
self.linkify_with_timeperiods(timeperiods, 'escalation_period')
self.linkify_with_contacts(contacts)
self.linkify_es_by_s(services)
self.linkify_es_by_h(hosts)
def add_escalation(self, escalation):
"""Wrapper for add_item method
:param escalation: escalation to add to item dict
:type escalation: alignak.objects.escalation.Escalation
:return: None
"""
self.add_item(escalation)
def linkify_es_by_s(self, services):
"""Add each escalation object into service.escalation attribute
:param services: service list, used to look for a specific service
:type services: alignak.objects.service.Services
:return: None
"""
for escalation in self:
# If no host, no hope of having a service
if not hasattr(escalation, 'host_name'):
continue
es_hname, sdesc = escalation.host_name, escalation.service_description
if not es_hname.strip() or not sdesc.strip():
continue
for hname in strip_and_uniq(es_hname.split(',')):
if sdesc.strip() == '*':
slist = services.find_srvs_by_hostname(hname)
if slist is not None:
slist = [services[serv] for serv in slist]
for serv in slist:
serv.escalations.append(escalation.uuid)
else:
for sname in strip_and_uniq(sdesc.split(',')):
serv = services.find_srv_by_name_and_hostname(hname, sname)
if serv is not None:
serv.escalations.append(escalation.uuid)
def linkify_es_by_h(self, hosts):
"""Add each escalation object into host.escalation attribute
:param hosts: host list, used to look for a specific host
:type hosts: alignak.objects.host.Hosts
:return: None
"""
for escal in self:
# If no host, no hope of having a service
if (not hasattr(escal, 'host_name') or escal.host_name.strip() == '' or
(hasattr(escal, 'service_description')
and escal.service_description.strip() != '')):
continue
# I must be NOT a escalation on for service
for hname in strip_and_uniq(escal.host_name.split(',')):
host = hosts.find_by_name(hname)
if host is not None:
host.escalations.append(escal.uuid)
def explode(self, hosts, hostgroups, contactgroups):
"""Loop over all escalation and explode hostsgroups in host
and contactgroups in contacts
Call Item.explode_host_groups_into_hosts and Item.explode_contact_groups_into_contacts
:param hosts: host list to explode
:type hosts: alignak.objects.host.Hosts
:param hostgroups: hostgroup list to explode
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:param contactgroups: contactgroup list to explode
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: None
"""
for i in self:
# items::explode_host_groups_into_hosts
# take all hosts from our hostgroup_name into our host_name property
self.explode_host_groups_into_hosts(i, hosts, hostgroups)
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(i, contactgroups)
|
class Escalations(Items):
'''Escalations manage a list of Escalation objects, used for parsing configuration
'''
def linkify(self, timeperiods, contacts, services, hosts):
'''Create link between objects::
* escalation -> host
* escalation -> service
* escalation -> timeperiods
* escalation -> contact
:param timeperiods: timeperiods to link
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param contacts: contacts to link
:type contacts: alignak.objects.contact.Contacts
:param services: services to link
:type services: alignak.objects.service.Services
:param hosts: hosts to link
:type hosts: alignak.objects.host.Hosts
:return: None
'''
pass
def add_escalation(self, escalation):
'''Wrapper for add_item method
:param escalation: escalation to add to item dict
:type escalation: alignak.objects.escalation.Escalation
:return: None
'''
pass
def linkify_es_by_s(self, services):
'''Add each escalation object into service.escalation attribute
:param services: service list, used to look for a specific service
:type services: alignak.objects.service.Services
:return: None
'''
pass
def linkify_es_by_h(self, hosts):
'''Add each escalation object into host.escalation attribute
:param hosts: host list, used to look for a specific host
:type hosts: alignak.objects.host.Hosts
:return: None
'''
pass
def explode(self, hosts, hostgroups, contactgroups):
'''Loop over all escalation and explode hostsgroups in host
and contactgroups in contacts
Call Item.explode_host_groups_into_hosts and Item.explode_contact_groups_into_contacts
:param hosts: host list to explode
:type hosts: alignak.objects.host.Hosts
:param hostgroups: hostgroup list to explode
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:param contactgroups: contactgroup list to explode
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: None
'''
pass
| 6 | 6 | 20 | 2 | 8 | 10 | 4 | 1.16 | 1 | 0 | 0 | 0 | 5 | 0 | 5 | 50 | 109 | 16 | 43 | 17 | 37 | 50 | 40 | 17 | 34 | 10 | 2 | 5 | 19 |
4,109 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/genericextinfo.py
|
alignak.objects.genericextinfo.GenericExtInfo
|
class GenericExtInfo(Item):
"""GenericExtInfo class is made to handle some parameters of SchedulingItem::
* notes
* notes_url
* icon_image
* icon_image_alt
"""
#######
# __ _ _ _
# / _(_) | | (_)
# ___ ___ _ __ | |_ _ __ _ _ _ _ __ __ _| |_ _ ___ _ __
# / __/ _ \| '_ \| _| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \
# | (_| (_) | | | | | | | (_| | |_| | | | (_| | |_| | (_) | | | |
# \___\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_|
# __/ |
# |___/
######
def get_name(self, index=False):
"""Accessor to host_name attribute or name if first not defined
:return: host name, use to search the host to merge
:rtype: str
"""
return getattr(self, 'host_name', getattr(self, 'name', 'UNKNOWN'))
|
class GenericExtInfo(Item):
'''GenericExtInfo class is made to handle some parameters of SchedulingItem::
* notes
* notes_url
* icon_image
* icon_image_alt
'''
def get_name(self, index=False):
'''Accessor to host_name attribute or name if first not defined
:return: host name, use to search the host to merge
:rtype: str
'''
pass
| 2 | 2 | 7 | 1 | 2 | 4 | 1 | 6.67 | 1 | 0 | 0 | 2 | 1 | 0 | 1 | 35 | 28 | 5 | 3 | 2 | 1 | 20 | 3 | 2 | 1 | 1 | 3 | 0 | 1 |
4,110 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/host.py
|
alignak.objects.host.Host
|
class Host(SchedulingItem): # pylint: disable=too-many-public-methods
"""Host class implements monitoring concepts for host.
For example it defines parents, check_interval, check_command etc.
"""
# AutoSlots create the __slots__ with properties and
# running_properties names
__metaclass__ = AutoSlots
ok_up = u'UP'
my_type = 'host'
my_name_property = "%s_name" % my_type
# if Host(or more generally Item) instances were created with all properties
# having a default value set in the instance then we wouldn't need this:
service_includes = service_excludes = []
# though, as these 2 attributes are to be relatively low used it's not
# that bad to have the default be defined only once here at the class level.
properties = SchedulingItem.properties.copy()
properties.update({
'host_name':
StringProp(fill_brok=[FULL_STATUS, CHECK_RESULT, 'next_schedule']),
'alias':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'address':
StringProp(fill_brok=[FULL_STATUS]),
'address6':
StringProp(fill_brok=[FULL_STATUS], default=''),
'parents':
ListProp(default=[],
fill_brok=[FULL_STATUS], merging='join', split_on_comma=True),
'hostgroups':
ListProp(default=[],
fill_brok=[FULL_STATUS], merging='join', split_on_comma=True),
'check_command':
StringProp(default='', fill_brok=[FULL_STATUS]),
'flap_detection_options':
ListProp(default=['o', 'd', 'x'], fill_brok=[FULL_STATUS],
merging='join', split_on_comma=True),
'notification_options':
ListProp(default=['d', 'x', 'r', 'f'], fill_brok=[FULL_STATUS],
merging='join', split_on_comma=True),
'vrml_image':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'statusmap_image':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'freshness_state':
CharProp(default='x', fill_brok=[FULL_STATUS]),
# No slots for this 2 because begin property by a number seems bad
# it's stupid!
'2d_coords':
StringProp(default=u'', fill_brok=[FULL_STATUS], no_slots=True),
'3d_coords':
StringProp(default=u'', fill_brok=[FULL_STATUS], no_slots=True),
# New to alignak
# 'fill_brok' is ok because in scheduler it's already
# a string from conf_send_preparation
'service_overrides':
ListProp(default=[], merging='duplicate', split_on_comma=False),
'service_excludes':
ListProp(default=[], merging='duplicate', split_on_comma=True),
'service_includes':
ListProp(default=[], merging='duplicate', split_on_comma=True),
'snapshot_criteria':
ListProp(default=['d', 'x'], fill_brok=[FULL_STATUS], merging='join'),
# Realm stuff
'realm':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
})
# properties set only for running purpose
# retention: save/load this property from retention
running_properties = SchedulingItem.running_properties.copy()
running_properties.update({
'state':
StringProp(default=u'UP', fill_brok=[FULL_STATUS, CHECK_RESULT],
retention=True),
'last_time_up':
IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True),
'last_time_down':
IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True),
'last_time_unreachable':
IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True),
# Our services
'services':
StringProp(default=[]),
# Realm stuff
'realm_name':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'got_default_realm':
BoolProp(default=False),
'state_before_hard_unknown_reach_phase':
StringProp(default=u'UP', retention=True),
})
# Hosts macros and prop that give the information
# the prop can be callable or not
macros = SchedulingItem.macros.copy()
macros.update({
'HOSTNAME': 'host_name',
'HOSTDISPLAYNAME': 'display_name',
'HOSTALIAS': 'alias',
'HOSTADDRESS': 'address',
'HOSTSTATE': 'state',
'HOSTSTATEID': 'state_id',
'LASTHOSTSTATE': 'last_state',
'LASTHOSTSTATEID': 'last_state_id',
'HOSTSTATETYPE': 'state_type',
'HOSTATTEMPT': 'attempt',
'MAXHOSTATTEMPTS': 'max_check_attempts',
'HOSTEVENTID': 'current_event_id',
'LASTHOSTEVENTID': 'last_event_id',
'HOSTPROBLEMID': 'current_problem_id',
'LASTHOSTPROBLEMID': 'last_problem_id',
'HOSTLATENCY': 'latency',
'HOSTEXECUTIONTIME': 'execution_time',
'HOSTDURATION': 'get_duration',
'HOSTDURATIONSEC': 'get_duration_sec',
'HOSTDOWNTIME': 'get_downtime',
'HOSTPERCENTCHANGE': 'percent_state_change',
'HOSTGROUPNAME': ('get_groupname', ['hostgroups']),
'HOSTGROUPNAMES': ('get_groupnames', ['hostgroups']),
'HOSTGROUPALIAS': ('get_groupalias', ['hostgroups']),
'HOSTGROUPALIASES': ('get_groupaliases', ['hostgroups']),
'LASTHOSTCHECK': 'last_chk',
'LASTHOSTSTATECHANGE': 'last_state_change',
'LASTHOSTUP': 'last_time_up',
'LASTHOSTDOWN': 'last_time_down',
'LASTHOSTUNREACHABLE': 'last_time_unreachable',
'HOSTOUTPUT': 'output',
'LONGHOSTOUTPUT': 'long_output',
'HOSTPERFDATA': 'perf_data',
'LASTHOSTPERFDATA': 'last_perf_data',
'HOSTCHECKCOMMAND': 'get_check_command',
'HOSTSNAPSHOTCOMMAND': 'get_snapshot_command',
'HOSTACKAUTHOR': 'get_ack_author_name',
'HOSTACKAUTHORNAME': 'get_ack_author_name',
'HOSTACKAUTHORALIAS': 'get_ack_author_name',
'HOSTACKCOMMENT': 'get_ack_comment',
'HOSTACTIONURL': 'action_url',
'HOSTNOTESURL': 'notes_url',
'HOSTNOTES': 'notes',
'HOSTREALM': 'realm_name',
'TOTALHOSTSERVICES': 'get_total_services',
'TOTALHOSTSERVICESOK': ('get_total_services_ok', ['services']),
'TOTALHOSTSERVICESWARNING': ('get_total_services_warning', ['services']),
'TOTALHOSTSERVICESCRITICAL': ('get_total_services_critical', ['services']),
'TOTALHOSTSERVICESUNKNOWN': ('get_total_services_unknown', ['services']),
'TOTALHOSTSERVICESUNREACHABLE': ('get_total_services_unreachable', ['services']),
'HOSTBUSINESSIMPACT': 'business_impact',
})
# Todo: really unuseful ... should be removed, but let's discuss!
# Currently, this breaks the macro resolver because the corresponding properties do not exit!
# Manage ADDRESSX macros by adding them dynamically
# for i in range(32):
# macros['HOSTADDRESS%d' % i] = 'address%d' % i
# This tab is used to transform old parameters name into new ones
# so from Nagios2 format, to Nagios3 ones.
# Or Alignak deprecated names like criticity
old_properties = SchedulingItem.old_properties.copy()
old_properties.update({
'hostgroup': 'hostgroups',
})
def __init__(self, params, parsing=True):
# Must convert the unreachable properties to manage the new 'x' option value
self.convert_conf_for_unreachable(params=params)
super(Host, self).__init__(params, parsing=parsing)
def __str__(self): # pragma: no cover
return '<Host%s %s, uuid=%s, %s (%s), realm: %s, use: %s />' \
% (' template' if self.is_a_template() else '', self.get_full_name(),
getattr(self, 'uuid', 'n/a'), getattr(self, 'state', 'n/a'),
getattr(self, 'state_type', 'n/a'), getattr(self, 'realm', 'Default'),
getattr(self, 'tags', None))
__repr__ = __str__
#######
# __ _ _ _
# / _(_) | | (_)
# ___ ___ _ __ | |_ _ __ _ _ _ _ __ __ _| |_ _ ___ _ __
# / __/ _ \| '_ \| _| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \
# | (_| (_) | | | | | | | (_| | |_| | | | (_| | |_| | (_) | | | |
# \___\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_|
# __/ |
# |___/
######
@staticmethod
def convert_conf_for_unreachable(params):
"""
The 'u' state for UNREACHABLE has been rewritten in 'x' in:
* flap_detection_options
* notification_options
* snapshot_criteria
So convert value from config file to keep compatibility with Nagios
:param params: parameters of the host before put in properties
:type params: dict
:return: None
"""
if params is None:
return
for prop in ['flap_detection_options', 'notification_options',
'snapshot_criteria', 'stalking_options']:
if prop in params:
params[prop] = [p.replace('u', 'x') for p in params[prop]]
if 'initial_state' in params and \
(params['initial_state'] == 'u' or params['initial_state'] == ['u']):
params['initial_state'] = 'x'
if 'freshness_state' in params and \
(params['freshness_state'] == 'u' or params['freshness_state'] == ['u']):
params['freshness_state'] = 'x'
def fill_predictive_missing_parameters(self):
"""Fill address with host_name if not already set
and define state with initial_state
:return: None
"""
if hasattr(self, 'host_name') and not hasattr(self, 'address'):
self.address = self.host_name
if hasattr(self, 'host_name') and not hasattr(self, 'alias'):
self.alias = self.host_name
if self.initial_state == 'd':
self.state = 'DOWN'
elif self.initial_state == 'x':
self.state = 'UNREACHABLE'
def is_correct(self):
"""Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
state = True
# Internal checks before executing inherited function...
cls = self.__class__
if getattr(self, 'host_name', ''):
for char in cls.illegal_object_name_chars:
if char in self.host_name:
self.add_error("host_name contains an illegal character: %s" % char)
state = False
# Fred: do not alert about missing check_command for an host... this because 1/ it is
# very verbose if hosts are not checked and 2/ because it is the Nagios default behavior
# if not self.check_command:
# self.add_warning("[%s::%s] has no defined check command"
# % (self.my_type, self.get_name()))
if getattr(self, 'notifications_enabled', None) and not getattr(self, 'contacts', None):
self.add_warning("notifications are enabled but no contacts nor "
"contact_groups property is defined for this host")
return super(Host, self).is_correct() and state
def get_services(self):
"""Get all services for this host
:return: list of services
:rtype: list
"""
return self.services
def get_groupname(self, hostgroups):
"""Get name of the first host's hostgroup (alphabetic sort)
:return: host group name
:rtype: str
TODO: Clean this. It returns the first hostgroup (alphabetic sort)
"""
group_names = self.get_groupnames(hostgroups).split(',')
return group_names[0]
def get_groupalias(self, hostgroups):
"""Get alias of the first host's hostgroup (alphabetic sort on group alias)
:return: host group alias
:rtype: str
TODO: Clean this. It returns the first hostgroup alias (alphabetic sort)
"""
group_aliases = self.get_groupaliases(hostgroups).split(',')
return group_aliases[0]
def get_groupnames(self, hostgroups):
"""Get names of the host's hostgroups
:return: comma separated names of hostgroups alphabetically sorted
:rtype: str
"""
group_names = []
for hostgroup_id in self.hostgroups:
hostgroup = hostgroups[hostgroup_id]
group_names.append(hostgroup.get_name())
return ','.join(sorted(group_names))
def get_groupaliases(self, hostgroups):
"""Get aliases of the host's hostgroups
:return: comma separated aliases of hostgroups alphabetically sorted
:rtype: str
"""
group_aliases = []
for hostgroup_id in self.hostgroups:
hostgroup = hostgroups[hostgroup_id]
group_aliases.append(hostgroup.alias)
return ','.join(sorted(group_aliases))
def get_hostgroups(self):
"""Accessor to hostgroups attribute
:return: hostgroup list object of host
:rtype: list
"""
return self.hostgroups
def add_service_link(self, service):
"""Add a service to the service list of this host
:param service: the service to add
:type service: alignak.objects.service.Service
:return: None
"""
self.services.append(service)
def is_excluded_for(self, service):
"""Check whether this host should have the passed service be "excluded" or "not included".
An host can define service_includes and/or service_excludes directive to either
white-list-only or black-list some services from itself.
:param service:
:type service: alignak.objects.service.Service
:return: True if is excluded, otherwise False
:rtype: bool
"""
return self.is_excluded_for_sdesc(
getattr(service, 'service_description', None), service.is_a_template()
)
def is_excluded_for_sdesc(self, sdesc, is_tpl=False):
""" Check whether this host should have the passed service *description*
be "excluded" or "not included".
:param sdesc: service description
:type sdesc:
:param is_tpl: True if service is template, otherwise False
:type is_tpl: bool
:return: True if service description excluded, otherwise False
:rtype: bool
"""
if not is_tpl and self.service_includes:
return sdesc not in self.service_includes
if self.service_excludes:
return sdesc in self.service_excludes
return False
#####
# _
# (_)
# _ __ _ _ _ __ _ __ _ _ __ __ _
# | '__| | | | '_ \| '_ \| | '_ \ / _` |
# | | | |_| | | | | | | | | | | | (_| |
# |_| \__,_|_| |_|_| |_|_|_| |_|\__, |
# __/ |
# |___/
####
def set_state_from_exit_status(self, status, notif_period, hosts, services):
"""Set the state in UP, DOWN, or UNREACHABLE according to the status of a check result.
:param status: integer between 0 and 3 (but not 1)
:type status: int
:return: None
"""
now = time.time()
# we should put in last_state the good last state:
# if not just change the state by an problem/impact
# we can take current state. But if it's the case, the
# real old state is self.state_before_impact (it's the TRUE
# state in fact)
# And only if we enable the impact state change
cls = self.__class__
if (cls.enable_problem_impacts_states_change and
self.is_impact and not self.state_changed_since_impact):
self.last_state = self.state_before_impact
else:
self.last_state = self.state
# There is no 1 case because it should have been managed by the caller for a host
# like the schedulingitem::consume method.
if status == 0:
self.state = u'UP'
self.state_id = 0
self.last_time_up = int(self.last_state_update)
# self.last_time_up = self.last_state_update
state_code = 'u'
elif status in (2, 3):
self.state = u'DOWN'
self.state_id = 1
self.last_time_down = int(self.last_state_update)
# self.last_time_down = self.last_state_update
state_code = 'd'
elif status == 4:
self.state = u'UNREACHABLE'
self.state_id = 4
self.last_time_unreachable = int(self.last_state_update)
# self.last_time_unreachable = self.last_state_update
state_code = 'x'
else:
self.state = u'DOWN' # exit code UNDETERMINED
self.state_id = 1
# self.last_time_down = int(self.last_state_update)
self.last_time_down = self.last_state_update
state_code = 'd'
if state_code in self.flap_detection_options:
self.add_flapping_change(self.state != self.last_state)
# Now we add a value, we update the is_flapping prop
self.update_flapping(notif_period, hosts, services)
if self.state != self.last_state and \
not (self.state == "DOWN" and self.last_state == "UNREACHABLE"):
self.last_state_change = self.last_state_update
self.duration_sec = now - self.last_state_change
def is_state(self, status):
"""Return if status match the current host status
:param status: status to compare ( "o", "d", "x"). Usually comes from config files
:type status: str
:return: True if status <=> self.status, otherwise False
:rtype: bool
"""
if status == self.state:
return True
# Now low status
if status == 'o' and self.state == u'UP':
return True
if status == 'd' and self.state == u'DOWN':
return True
if status in ['u', 'x'] and self.state == u'UNREACHABLE':
return True
return False
def last_time_non_ok_or_up(self):
"""Get the last time the host was in a non-OK state
:return: self.last_time_down if self.last_time_down > self.last_time_up, 0 otherwise
:rtype: int
"""
non_ok_times = [x for x in [self.last_time_down]
if x > self.last_time_up]
if not non_ok_times:
last_time_non_ok = 0 # todo: program_start would be better?
else:
last_time_non_ok = min(non_ok_times)
return last_time_non_ok
def raise_check_result(self):
"""Raise ACTIVE CHECK RESULT entry
Example : "ACTIVE HOST CHECK: server;DOWN;HARD;1;I don't know what to say..."
:return: None
"""
if not self.__class__.log_active_checks:
return
log_level = 'info'
if self.state == 'DOWN':
log_level = 'error'
elif self.state == 'UNREACHABLE':
log_level = 'warning'
brok = make_monitoring_log(
log_level, 'ACTIVE HOST CHECK: %s;%s;%d;%s' % (self.get_name(), self.state,
self.attempt, self.output)
)
self.broks.append(brok)
def raise_alert_log_entry(self):
"""Raise HOST ALERT entry
Format is : "HOST ALERT: *get_name()*;*state*;*state_type*;*attempt*;*output*"
Example : "HOST ALERT: server;DOWN;HARD;1;I don't know what to say..."
:return: None
"""
if self.__class__.log_alerts:
log_level = 'info'
if self.state == 'DOWN':
log_level = 'error'
if self.state == 'UNREACHABLE':
log_level = 'warning'
brok = make_monitoring_log(
log_level, 'HOST ALERT: %s;%s;%s;%d;%s' % (
self.get_name(), self.state, self.state_type, self.attempt, self.output
)
)
self.broks.append(brok)
if 'ALIGNAK_LOG_ALERTS' in os.environ:
if os.environ['ALIGNAK_LOG_ALERTS'] == 'WARNING':
logger.warning('HOST ALERT: %s;%s;%s;%d;%s', self.get_name(), self.state,
self.state_type, self.attempt, self.output)
else:
logger.info('HOST ALERT: %s;%s;%s;%d;%s', self.get_name(), self.state,
self.state_type, self.attempt, self.output)
def raise_initial_state(self):
"""Raise CURRENT HOST ALERT entry (info level)
Format is : "CURRENT HOST STATE: *get_name()*;*state*;*state_type*;*attempt*;*output*"
Example : "CURRENT HOST STATE: server;DOWN;HARD;1;I don't know what to say..."
:return: None
"""
if not self.__class__.log_initial_states:
return
log_level = 'info'
if self.state == 'DOWN':
log_level = 'error'
if self.state == 'UNREACHABLE':
log_level = 'warning'
brok = make_monitoring_log(
log_level, 'CURRENT HOST STATE: %s;%s;%s;%d;%s' % (
self.get_name(), self.state, self.state_type, self.attempt, self.output
)
)
self.broks.append(brok)
def raise_notification_log_entry(self, notif, contact, host_ref=None):
"""Raise HOST NOTIFICATION entry (critical level)
Format is : "HOST NOTIFICATION: *contact.get_name()*;*self.get_name()*;*state*;
*command.get_name()*;*output*"
Example : "HOST NOTIFICATION: superadmin;server;UP;notify-by-rss;no output"
:param notif: notification object created by host alert
:type notif: alignak.objects.notification.Notification
:return: None
"""
if self.__class__.log_notifications:
log_level = 'info'
command = notif.command_call
if notif.type in (u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED',
u'CUSTOM', u'ACKNOWLEDGEMENT',
u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED'):
state = '%s (%s)' % (notif.type, self.state)
else:
state = self.state
if self.state == 'UNREACHABLE':
log_level = 'warning'
if self.state == 'DOWN':
log_level = 'error'
brok = make_monitoring_log(
log_level, "HOST NOTIFICATION: %s;%s;%s;%s;%s;%s" % (
contact.get_name(), self.get_name(), state,
notif.notif_nb, command.get_name(), self.output
)
)
self.broks.append(brok)
if 'ALIGNAK_LOG_NOTIFICATIONS' in os.environ:
if os.environ['ALIGNAK_LOG_NOTIFICATIONS'] == 'WARNING':
logger.warning("HOST NOTIFICATION: %s;%s;%s;%s;%s;%s",
contact.get_name(), self.get_name(), state,
notif.notif_nb, command.get_name(), self.output)
else:
logger.info("HOST NOTIFICATION: %s;%s;%s;%s;%s;%s",
contact.get_name(), self.get_name(), state,
notif.notif_nb, command.get_name(), self.output)
def raise_event_handler_log_entry(self, command):
"""Raise HOST EVENT HANDLER entry (critical level)
Format is : "HOST EVENT HANDLER: *self.get_name()*;*state*;*state_type*;*attempt*;
*command.get_name()*"
Example : "HOST EVENT HANDLER: server;UP;HARD;1;notify-by-rss"
:param command: Handler launched
:type command: alignak.objects.command.Command
:return: None
"""
if not self.__class__.log_event_handlers:
return
log_level = 'info'
if self.state == 'UNREACHABLE':
log_level = 'warning'
if self.state == 'DOWN':
log_level = 'error'
brok = make_monitoring_log(
log_level, "HOST EVENT HANDLER: %s;%s;%s;%s;%s" % (
self.get_name(), self.state, self.state_type, self.attempt, command.get_name()
)
)
self.broks.append(brok)
def raise_snapshot_log_entry(self, command):
"""Raise HOST SNAPSHOT entry (critical level)
Format is : "HOST SNAPSHOT: *self.get_name()*;*state*;*state_type*;*attempt*;
*command.get_name()*"
Example : "HOST SNAPSHOT: server;UP;HARD;1;notify-by-rss"
:param command: Snapshot command launched
:type command: alignak.objects.command.Command
:return: None
"""
if not self.__class__.log_snapshots:
return
log_level = 'info'
if self.state == 'UNREACHABLE':
log_level = 'warning'
if self.state == 'DOWN':
log_level = 'error'
brok = make_monitoring_log(
log_level, "HOST SNAPSHOT: %s;%s;%s;%s;%s" % (
self.get_name(), self.state, self.state_type, self.attempt, command.get_name()
)
)
self.broks.append(brok)
def raise_flapping_start_log_entry(self, change_ratio, threshold):
"""Raise HOST FLAPPING ALERT START entry (critical level)
Format is : "HOST FLAPPING ALERT: *self.get_name()*;STARTED;
Host appears to have started
flapping (*change_ratio*% change >= *threshold*% threshold)"
Example : "HOST FLAPPING ALERT: server;STARTED;
Host appears to have started
flapping (50.6% change >= 50.0% threshold)"
:param change_ratio: percent of changing state
:type change_ratio: float
:param threshold: threshold (percent) to trigger this log entry
:type threshold: float
:return: None
"""
if not self.__class__.log_flappings:
return
brok = make_monitoring_log(
'info',
"HOST FLAPPING ALERT: %s;STARTED; Host appears to have started "
"flapping (%.1f%% change >= %.1f%% threshold)"
% (self.get_name(), change_ratio, threshold)
)
self.broks.append(brok)
def raise_flapping_stop_log_entry(self, change_ratio, threshold):
"""Raise HOST FLAPPING ALERT STOPPED entry (critical level)
Format is : "HOST FLAPPING ALERT: *self.get_name()*;STOPPED;
Host appears to have stopped
flapping (*change_ratio*% change < *threshold*% threshold)"
Example : "HOST FLAPPING ALERT: server;STOPPED;
Host appears to have stopped
flapping (23.0% change < 25.0% threshold)"
:param change_ratio: percent of changing state
:type change_ratio: float
:param threshold: threshold (percent) to trigger this log entry
:type threshold: float
:return: None
"""
if not self.__class__.log_flappings:
return
brok = make_monitoring_log(
'info',
"HOST FLAPPING ALERT: %s;STOPPED; Host appears to have stopped flapping "
"(%.1f%% change < %.1f%% threshold)"
% (self.get_name(), change_ratio, threshold)
)
self.broks.append(brok)
def raise_no_next_check_log_entry(self):
"""Raise no scheduled check entry (warning level)
Format is : "I cannot schedule the check for the host 'get_name()*'
because there is not future valid time"
Example : "I cannot schedule the check for the host 'Server'
because there is not future valid time"
:return: None
"""
logger.warning("I cannot schedule the check for the host '%s' "
"because there is not future valid time",
self.get_name())
def raise_acknowledge_log_entry(self):
"""Raise HOST ACKNOWLEDGE ALERT entry (critical level)
:return: None
"""
if not self.__class__.log_acknowledgements:
return
brok = make_monitoring_log(
'info', "HOST ACKNOWLEDGE ALERT: %s;STARTED; "
"Host problem has been acknowledged" % self.get_name()
)
self.broks.append(brok)
def raise_unacknowledge_log_entry(self):
"""Raise HOST ACKNOWLEDGE STOPPED entry (critical level)
:return: None
"""
if not self.__class__.log_acknowledgements:
return
brok = make_monitoring_log(
'info', "HOST ACKNOWLEDGE ALERT: %s;EXPIRED; "
"Host problem acknowledge expired" % self.get_name()
)
self.broks.append(brok)
def raise_enter_downtime_log_entry(self):
"""Raise HOST DOWNTIME ALERT entry (critical level)
Format is : "HOST DOWNTIME ALERT: *get_name()*;STARTED;
Host has entered a period of scheduled downtime"
Example : "HOST DOWNTIME ALERT: test_host_0;STARTED;
Host has entered a period of scheduled downtime"
:return: None
"""
if not self.__class__.log_downtimes:
return
brok = make_monitoring_log(
'info', "HOST DOWNTIME ALERT: %s;STARTED; "
"Host has entered a period of scheduled downtime" % (self.get_name())
)
self.broks.append(brok)
def raise_exit_downtime_log_entry(self):
"""Raise HOST DOWNTIME ALERT entry (critical level)
Format is : "HOST DOWNTIME ALERT: *get_name()*;STOPPED;
Host has entered a period of scheduled downtime"
Example : "HOST DOWNTIME ALERT: test_host_0;STOPPED;
Host has entered a period of scheduled downtime"
:return: None
"""
if not self.__class__.log_downtimes:
return
brok = make_monitoring_log(
'info', "HOST DOWNTIME ALERT: %s;STOPPED; "
"Host has exited from a period of scheduled downtime" % (self.get_name())
)
self.broks.append(brok)
def raise_cancel_downtime_log_entry(self):
"""Raise HOST DOWNTIME ALERT entry (critical level)
Format is : "HOST DOWNTIME ALERT: *get_name()*;CANCELLED;
Host has entered a period of scheduled downtime"
Example : "HOST DOWNTIME ALERT: test_host_0;CANCELLED;
Host has entered a period of scheduled downtime"
:return: None
"""
if not self.__class__.log_downtimes:
return
brok = make_monitoring_log(
'info', "HOST DOWNTIME ALERT: %s;CANCELLED; "
"Scheduled downtime for host has been cancelled." % (self.get_name())
)
self.broks.append(brok)
def manage_stalking(self, check):
"""Check if the host need stalking or not (immediate recheck)
If one stalking_options matches the exit_status ('o' <=> 0 ...) then stalk is needed
Raise a log entry (info level) if stalk is needed
:param check: finished check (check.status == 'waitconsume')
:type check: alignak.check.Check
:return: None
"""
need_stalk = False
if check.status == u'waitconsume':
if check.exit_status == 0 and 'o' in self.stalking_options:
need_stalk = True
elif check.exit_status == 1 and 'd' in self.stalking_options:
need_stalk = True
elif check.exit_status == 2 and 'd' in self.stalking_options:
need_stalk = True
if check.output != self.output:
need_stalk = False
if need_stalk:
logger.info("Stalking %s: %s", self.get_name(), self.output)
def notification_is_blocked_by_contact(self, notifways, timeperiods, notif, contact):
"""Check if the notification is blocked by this contact.
:param notif: notification created earlier
:type notif: alignak.notification.Notification
:param contact: contact we want to notify
:type notif: alignak.objects.contact.Contact
:return: True if the notification is blocked, False otherwise
:rtype: bool
"""
return not contact.want_host_notification(notifways, timeperiods,
self.last_chk, self.state, notif.type,
self.business_impact, notif.command_call)
def get_duration_sec(self):
"""Get duration in seconds. (cast it before returning)
:return: duration in seconds
:rtype: int
TODO: Move to util or SchedulingItem class
"""
return str(int(self.duration_sec))
def get_duration(self):
"""Get duration formatted
Format is : "HHh MMm SSs"
Example : "10h 20m 40s"
:return: Formatted duration
:rtype: str
"""
mins, secs = divmod(self.duration_sec, 60)
hours, mins = divmod(mins, 60)
return "%02dh %02dm %02ds" % (hours, mins, secs)
def is_blocking_notifications(self, notification_period, hosts, services, n_type, t_wished):
# pylint: disable=too-many-branches
# pylint: disable=too-many-return-statements, too-many-boolean-expressions
"""Check if a notification is blocked by the host.
Conditions are ONE of the following::
* enable_notification is False (global)
* not in a notification_period
* notifications_enable is False (local)
* notification_options is 'n' or matches the state ('DOWN' <=> 'd' ...)
(include flapping and downtimes)
* state goes ok and type is 'ACKNOWLEDGEMENT' (no sense)
* scheduled_downtime_depth > 0 and flapping (host is in downtime)
* scheduled_downtime_depth > 1 and not downtime end (deep downtime)
* scheduled_downtime_depth > 0 and problem or recovery (host is in downtime)
* SOFT state of a problem (we raise notification ony on HARD state)
* ACK notification when already ACK (don't raise again ACK)
* not flapping notification in a flapping state
* business rule smart notifications is enabled and all its children have been acknowledged
or are under downtime
:param n_type: notification type
:type n_type:
:param t_wished: the time we should like to notify the host (mostly now)
:type t_wished: float
:return: True if ONE of the above condition was met, otherwise False
:rtype: bool
TODO: Refactor this, a lot of code duplication with Service.is_blocking_notifications
"""
logger.debug("Checking if a host %s (%s) notification is blocked...",
self.get_name(), self.state)
if t_wished is None:
t_wished = time.time()
# TODO
# forced notification -> false
# custom notification -> false
# Block if notifications are program-wide disabled
# Block if notifications are disabled for this host
# Block if the current status is in the notification_options d,u,r,f,s
if not self.enable_notifications or \
not self.notifications_enabled or \
'n' in self.notification_options:
logger.debug("Host: %s, notification %s sending is blocked by globals",
self.get_name(), n_type)
return True
# Does the notification period allow sending out this notification?
if notification_period is not None and not notification_period.is_time_valid(t_wished):
logger.debug("Host: %s, notification %s sending is blocked by globals",
self.get_name(), n_type)
return True
if n_type in (u'PROBLEM', u'RECOVERY') and (
self.state == u'DOWN' and 'd' not in self.notification_options or
self.state == u'UP' and 'r' not in self.notification_options or
self.state == u'UNREACHABLE' and 'x' not in self.notification_options):
logger.debug("Host: %s, notification %s sending is blocked by options",
self.get_name(), n_type)
return True
if (n_type in (u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED') and
'f' not in self.notification_options):
logger.debug("Host: %s, notification %s sending is blocked by options",
n_type, self.get_name())
return True
if (n_type in (u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED') and
's' not in self.notification_options):
logger.debug("Host: %s, notification %s sending is blocked by options",
n_type, self.get_name())
return True
# Flapping notifications are blocked when in scheduled downtime
if (n_type in (u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED') and
self.scheduled_downtime_depth > 0):
logger.debug("Host: %s, notification %s sending is blocked by downtime",
self.get_name(), n_type)
return True
# Acknowledgements make no sense when the status is ok/up
if n_type == u'ACKNOWLEDGEMENT' and self.state == self.ok_up:
logger.debug("Host: %s, notification %s sending is blocked by current state",
self.get_name(), n_type)
return True
# When in deep downtime, only allow end-of-downtime notifications
# In depth 1 the downtime just started and can be notified
if self.scheduled_downtime_depth > 1 and n_type not in (u'DOWNTIMEEND',
u'DOWNTIMECANCELLED'):
logger.debug("Host: %s, notification %s sending is blocked by deep downtime",
self.get_name(), n_type)
return True
# Block if in a scheduled downtime and a problem arises
if self.scheduled_downtime_depth > 0 and \
n_type in (u'PROBLEM', u'RECOVERY', u'ACKNOWLEDGEMENT'):
logger.debug("Host: %s, notification %s sending is blocked by downtime",
self.get_name(), n_type)
return True
# Block if the status is SOFT
if self.state_type == u'SOFT' and n_type == u'PROBLEM':
logger.debug("Host: %s, notification %s sending is blocked by soft state",
self.get_name(), n_type)
return True
# Block if the problem has already been acknowledged
if self.problem_has_been_acknowledged and n_type not in (u'ACKNOWLEDGEMENT',
u'DOWNTIMESTART',
u'DOWNTIMEEND',
u'DOWNTIMECANCELLED'):
logger.debug("Host: %s, notification %s sending is blocked by acknowledged",
self.get_name(), n_type)
return True
# Block if flapping
if self.is_flapping and n_type not in (u'FLAPPINGSTART',
u'FLAPPINGSTOP',
u'FLAPPINGDISABLED'):
logger.debug("Host: %s, notification %s sending is blocked by flapping",
self.get_name(), n_type)
return True
# Block if business rule smart notifications is enabled and all its
# children have been acknowledged or are under downtime.
if self.got_business_rule is True \
and self.business_rule_smart_notifications is True \
and self.business_rule_notification_is_blocked(hosts, services) is True \
and n_type == u'PROBLEM':
logger.debug("Host: %s, notification %s sending is blocked by business rules",
self.get_name(), n_type)
return True
logger.debug("Host: %s, notification %s sending is not blocked", self.get_name(), n_type)
return False
def get_total_services(self):
"""Get the number of services for this host
:return: service list length
:rtype: str
"""
return str(len(self.services))
def _tot_services_by_state(self, services, state):
"""Get the number of service in the specified state
:param state: state to filter service
:type state:
:return: number of service with s.state_id == state
:rtype: int
"""
return str(sum(1 for s in self.services
if services[s].state_id == state))
def get_total_services_ok(self, services):
"""Get number of services ok
:param services:
:type services:
:return: Number of services
:rtype: int
"""
return self._tot_services_by_state(services, 0)
def get_total_services_warning(self, services):
"""Get number of services warning
:param services:
:type services:
:return: Number of services
:rtype: int
"""
return self._tot_services_by_state(services, 1)
def get_total_services_critical(self, services):
"""Get number of services critical
:param services:
:type services:
:return: Number of services
:rtype: int
"""
return self._tot_services_by_state(services, 2)
def get_total_services_unknown(self, services):
"""Get number of services unknown
:param services:
:type services:
:return: Number of services
:rtype: int
"""
return self._tot_services_by_state(services, 3)
def get_total_services_unreachable(self, services):
"""Get number of services unreachable
:param services:
:type services:
:return: Number of services
:rtype: int
"""
return self._tot_services_by_state(services, 4)
def get_ack_author_name(self):
"""Get the author of the acknowledgement
:return: author
:rtype: str
"""
if self.acknowledgement is None:
return ''
return getattr(self.acknowledgement, "author", '')
def get_ack_comment(self):
"""Get the comment of the acknowledgement
:return: comment
:rtype: str
"""
if self.acknowledgement is None:
return ''
return getattr(self.acknowledgement, "comment", '')
def get_snapshot_command(self):
"""Wrapper to get the name of the snapshot_command attribute
:return: snapshot_command name
:rtype: str
"""
if not getattr(self, 'snapshot_command', None):
return ''
return self.snapshot_command.get_name()
def get_downtime(self):
"""Accessor to scheduled_downtime_depth attribute
:return: scheduled downtime depth
:rtype: str
"""
return str(self.scheduled_downtime_depth)
def get_short_status(self, hosts, services):
"""Get the short status of this host
:return: "U", "D", "X" or "n/a" based on host state_id or business_rule state
:rtype: str
"""
mapping = {
0: "U",
1: "D",
4: "X",
}
if self.got_business_rule:
return mapping.get(self.business_rule.get_state(hosts, services), "n/a")
return mapping.get(self.state_id, "n/a")
def get_status(self, hosts, services):
"""Get the status of this host
:return: "UP", "DOWN", "UNREACHABLE" or "n/a" based on host state_id or business_rule state
:rtype: str
"""
if self.got_business_rule:
mapping = {
0: "UP",
1: "DOWN",
4: "UNREACHABLE",
}
return mapping.get(self.business_rule.get_state(hosts, services), "n/a")
return self.state
def get_overall_state(self, services):
"""Get the host overall state including the host self status
and the status of its services
Compute the host overall state identifier, including:
- the acknowledged state
- the downtime state
The host overall state is (prioritized):
- an host not monitored (5)
- an host down (4)
- an host unreachable (3)
- an host downtimed (2)
- an host acknowledged (1)
- an host up (0)
If the host overall state is <= 2, then the host overall state is the maximum value
of the host overall state and all the host services overall states.
The overall state of an host is:
- 0 if the host is UP and all its services are OK
- 1 if the host is DOWN or UNREACHABLE and acknowledged or
at least one of its services is acknowledged and
no other services are WARNING or CRITICAL
- 2 if the host is DOWN or UNREACHABLE and in a scheduled downtime or
at least one of its services is in a scheduled downtime and no
other services are WARNING or CRITICAL
- 3 if the host is UNREACHABLE or
at least one of its services is WARNING
- 4 if the host is DOWN or
at least one of its services is CRITICAL
- 5 if the host is not monitored
:param services: a list of known services
:type services: alignak.objects.service.Services
:return: the host overall state
:rtype: int
"""
overall_state = 0
# pylint: disable=too-many-nested-blocks
if not self.monitored:
overall_state = 5
elif self.acknowledged:
overall_state = 1
elif self.downtimed:
overall_state = 2
elif self.state_type == 'HARD':
if self.state == 'UNREACHABLE':
overall_state = 3
elif self.state == 'DOWN':
overall_state = 4
# Only consider the hosts services state if all is ok (or almost...)
if overall_state <= 2:
for service in self.services:
if service in services:
service = services[service]
# Only for monitored services
if service.overall_state_id < 5:
overall_state = max(overall_state, service.overall_state_id)
return overall_state
|
class Host(SchedulingItem):
'''Host class implements monitoring concepts for host.
For example it defines parents, check_interval, check_command etc.
'''
def __init__(self, params, parsing=True):
pass
def __str__(self):
pass
@staticmethod
def convert_conf_for_unreachable(params):
'''
The 'u' state for UNREACHABLE has been rewritten in 'x' in:
* flap_detection_options
* notification_options
* snapshot_criteria
So convert value from config file to keep compatibility with Nagios
:param params: parameters of the host before put in properties
:type params: dict
:return: None
'''
pass
def fill_predictive_missing_parameters(self):
'''Fill address with host_name if not already set
and define state with initial_state
:return: None
'''
pass
def is_correct(self):
'''Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
'''
pass
def get_services(self):
'''Get all services for this host
:return: list of services
:rtype: list
'''
pass
def get_groupname(self, hostgroups):
'''Get name of the first host's hostgroup (alphabetic sort)
:return: host group name
:rtype: str
TODO: Clean this. It returns the first hostgroup (alphabetic sort)
'''
pass
def get_groupalias(self, hostgroups):
'''Get alias of the first host's hostgroup (alphabetic sort on group alias)
:return: host group alias
:rtype: str
TODO: Clean this. It returns the first hostgroup alias (alphabetic sort)
'''
pass
def get_groupnames(self, hostgroups):
'''Get names of the host's hostgroups
:return: comma separated names of hostgroups alphabetically sorted
:rtype: str
'''
pass
def get_groupaliases(self, hostgroups):
'''Get aliases of the host's hostgroups
:return: comma separated aliases of hostgroups alphabetically sorted
:rtype: str
'''
pass
def get_hostgroups(self):
'''Accessor to hostgroups attribute
:return: hostgroup list object of host
:rtype: list
'''
pass
def add_service_link(self, service):
'''Add a service to the service list of this host
:param service: the service to add
:type service: alignak.objects.service.Service
:return: None
'''
pass
def is_excluded_for(self, service):
'''Check whether this host should have the passed service be "excluded" or "not included".
An host can define service_includes and/or service_excludes directive to either
white-list-only or black-list some services from itself.
:param service:
:type service: alignak.objects.service.Service
:return: True if is excluded, otherwise False
:rtype: bool
'''
pass
def is_excluded_for_sdesc(self, sdesc, is_tpl=False):
''' Check whether this host should have the passed service *description*
be "excluded" or "not included".
:param sdesc: service description
:type sdesc:
:param is_tpl: True if service is template, otherwise False
:type is_tpl: bool
:return: True if service description excluded, otherwise False
:rtype: bool
'''
pass
def set_state_from_exit_status(self, status, notif_period, hosts, services):
'''Set the state in UP, DOWN, or UNREACHABLE according to the status of a check result.
:param status: integer between 0 and 3 (but not 1)
:type status: int
:return: None
'''
pass
def is_state(self, status):
'''Return if status match the current host status
:param status: status to compare ( "o", "d", "x"). Usually comes from config files
:type status: str
:return: True if status <=> self.status, otherwise False
:rtype: bool
'''
pass
def last_time_non_ok_or_up(self):
'''Get the last time the host was in a non-OK state
:return: self.last_time_down if self.last_time_down > self.last_time_up, 0 otherwise
:rtype: int
'''
pass
def raise_check_result(self):
'''Raise ACTIVE CHECK RESULT entry
Example : "ACTIVE HOST CHECK: server;DOWN;HARD;1;I don't know what to say..."
:return: None
'''
pass
def raise_alert_log_entry(self):
'''Raise HOST ALERT entry
Format is : "HOST ALERT: *get_name()*;*state*;*state_type*;*attempt*;*output*"
Example : "HOST ALERT: server;DOWN;HARD;1;I don't know what to say..."
:return: None
'''
pass
def raise_initial_state(self):
'''Raise CURRENT HOST ALERT entry (info level)
Format is : "CURRENT HOST STATE: *get_name()*;*state*;*state_type*;*attempt*;*output*"
Example : "CURRENT HOST STATE: server;DOWN;HARD;1;I don't know what to say..."
:return: None
'''
pass
def raise_notification_log_entry(self, notif, contact, host_ref=None):
'''Raise HOST NOTIFICATION entry (critical level)
Format is : "HOST NOTIFICATION: *contact.get_name()*;*self.get_name()*;*state*;
*command.get_name()*;*output*"
Example : "HOST NOTIFICATION: superadmin;server;UP;notify-by-rss;no output"
:param notif: notification object created by host alert
:type notif: alignak.objects.notification.Notification
:return: None
'''
pass
def raise_event_handler_log_entry(self, command):
'''Raise HOST EVENT HANDLER entry (critical level)
Format is : "HOST EVENT HANDLER: *self.get_name()*;*state*;*state_type*;*attempt*;
*command.get_name()*"
Example : "HOST EVENT HANDLER: server;UP;HARD;1;notify-by-rss"
:param command: Handler launched
:type command: alignak.objects.command.Command
:return: None
'''
pass
def raise_snapshot_log_entry(self, command):
'''Raise HOST SNAPSHOT entry (critical level)
Format is : "HOST SNAPSHOT: *self.get_name()*;*state*;*state_type*;*attempt*;
*command.get_name()*"
Example : "HOST SNAPSHOT: server;UP;HARD;1;notify-by-rss"
:param command: Snapshot command launched
:type command: alignak.objects.command.Command
:return: None
'''
pass
def raise_flapping_start_log_entry(self, change_ratio, threshold):
'''Raise HOST FLAPPING ALERT START entry (critical level)
Format is : "HOST FLAPPING ALERT: *self.get_name()*;STARTED;
Host appears to have started
flapping (*change_ratio*% change >= *threshold*% threshold)"
Example : "HOST FLAPPING ALERT: server;STARTED;
Host appears to have started
flapping (50.6% change >= 50.0% threshold)"
:param change_ratio: percent of changing state
:type change_ratio: float
:param threshold: threshold (percent) to trigger this log entry
:type threshold: float
:return: None
'''
pass
def raise_flapping_stop_log_entry(self, change_ratio, threshold):
'''Raise HOST FLAPPING ALERT STOPPED entry (critical level)
Format is : "HOST FLAPPING ALERT: *self.get_name()*;STOPPED;
Host appears to have stopped
flapping (*change_ratio*% change < *threshold*% threshold)"
Example : "HOST FLAPPING ALERT: server;STOPPED;
Host appears to have stopped
flapping (23.0% change < 25.0% threshold)"
:param change_ratio: percent of changing state
:type change_ratio: float
:param threshold: threshold (percent) to trigger this log entry
:type threshold: float
:return: None
'''
pass
def raise_no_next_check_log_entry(self):
'''Raise no scheduled check entry (warning level)
Format is : "I cannot schedule the check for the host 'get_name()*'
because there is not future valid time"
Example : "I cannot schedule the check for the host 'Server'
because there is not future valid time"
:return: None
'''
pass
def raise_acknowledge_log_entry(self):
'''Raise HOST ACKNOWLEDGE ALERT entry (critical level)
:return: None
'''
pass
def raise_unacknowledge_log_entry(self):
'''Raise HOST ACKNOWLEDGE STOPPED entry (critical level)
:return: None
'''
pass
def raise_enter_downtime_log_entry(self):
'''Raise HOST DOWNTIME ALERT entry (critical level)
Format is : "HOST DOWNTIME ALERT: *get_name()*;STARTED;
Host has entered a period of scheduled downtime"
Example : "HOST DOWNTIME ALERT: test_host_0;STARTED;
Host has entered a period of scheduled downtime"
:return: None
'''
pass
def raise_exit_downtime_log_entry(self):
'''Raise HOST DOWNTIME ALERT entry (critical level)
Format is : "HOST DOWNTIME ALERT: *get_name()*;STOPPED;
Host has entered a period of scheduled downtime"
Example : "HOST DOWNTIME ALERT: test_host_0;STOPPED;
Host has entered a period of scheduled downtime"
:return: None
'''
pass
def raise_cancel_downtime_log_entry(self):
'''Raise HOST DOWNTIME ALERT entry (critical level)
Format is : "HOST DOWNTIME ALERT: *get_name()*;CANCELLED;
Host has entered a period of scheduled downtime"
Example : "HOST DOWNTIME ALERT: test_host_0;CANCELLED;
Host has entered a period of scheduled downtime"
:return: None
'''
pass
def manage_stalking(self, check):
'''Check if the host need stalking or not (immediate recheck)
If one stalking_options matches the exit_status ('o' <=> 0 ...) then stalk is needed
Raise a log entry (info level) if stalk is needed
:param check: finished check (check.status == 'waitconsume')
:type check: alignak.check.Check
:return: None
'''
pass
def notification_is_blocked_by_contact(self, notifways, timeperiods, notif, contact):
'''Check if the notification is blocked by this contact.
:param notif: notification created earlier
:type notif: alignak.notification.Notification
:param contact: contact we want to notify
:type notif: alignak.objects.contact.Contact
:return: True if the notification is blocked, False otherwise
:rtype: bool
'''
pass
def get_duration_sec(self):
'''Get duration in seconds. (cast it before returning)
:return: duration in seconds
:rtype: int
TODO: Move to util or SchedulingItem class
'''
pass
def get_duration_sec(self):
'''Get duration formatted
Format is : "HHh MMm SSs"
Example : "10h 20m 40s"
:return: Formatted duration
:rtype: str
'''
pass
def is_blocking_notifications(self, notification_period, hosts, services, n_type, t_wished):
'''Check if a notification is blocked by the host.
Conditions are ONE of the following::
* enable_notification is False (global)
* not in a notification_period
* notifications_enable is False (local)
* notification_options is 'n' or matches the state ('DOWN' <=> 'd' ...)
(include flapping and downtimes)
* state goes ok and type is 'ACKNOWLEDGEMENT' (no sense)
* scheduled_downtime_depth > 0 and flapping (host is in downtime)
* scheduled_downtime_depth > 1 and not downtime end (deep downtime)
* scheduled_downtime_depth > 0 and problem or recovery (host is in downtime)
* SOFT state of a problem (we raise notification ony on HARD state)
* ACK notification when already ACK (don't raise again ACK)
* not flapping notification in a flapping state
* business rule smart notifications is enabled and all its children have been acknowledged
or are under downtime
:param n_type: notification type
:type n_type:
:param t_wished: the time we should like to notify the host (mostly now)
:type t_wished: float
:return: True if ONE of the above condition was met, otherwise False
:rtype: bool
TODO: Refactor this, a lot of code duplication with Service.is_blocking_notifications
'''
pass
def get_total_services(self):
'''Get the number of services for this host
:return: service list length
:rtype: str
'''
pass
def _tot_services_by_state(self, services, state):
'''Get the number of service in the specified state
:param state: state to filter service
:type state:
:return: number of service with s.state_id == state
:rtype: int
'''
pass
def get_total_services_ok(self, services):
'''Get number of services ok
:param services:
:type services:
:return: Number of services
:rtype: int
'''
pass
def get_total_services_warning(self, services):
'''Get number of services warning
:param services:
:type services:
:return: Number of services
:rtype: int
'''
pass
def get_total_services_critical(self, services):
'''Get number of services critical
:param services:
:type services:
:return: Number of services
:rtype: int
'''
pass
def get_total_services_unknown(self, services):
'''Get number of services unknown
:param services:
:type services:
:return: Number of services
:rtype: int
'''
pass
def get_total_services_unreachable(self, services):
'''Get number of services unreachable
:param services:
:type services:
:return: Number of services
:rtype: int
'''
pass
def get_ack_author_name(self):
'''Get the author of the acknowledgement
:return: author
:rtype: str
'''
pass
def get_ack_comment(self):
'''Get the comment of the acknowledgement
:return: comment
:rtype: str
'''
pass
def get_snapshot_command(self):
'''Wrapper to get the name of the snapshot_command attribute
:return: snapshot_command name
:rtype: str
'''
pass
def get_downtime(self):
'''Accessor to scheduled_downtime_depth attribute
:return: scheduled downtime depth
:rtype: str
'''
pass
def get_short_status(self, hosts, services):
'''Get the short status of this host
:return: "U", "D", "X" or "n/a" based on host state_id or business_rule state
:rtype: str
'''
pass
def get_status(self, hosts, services):
'''Get the status of this host
:return: "UP", "DOWN", "UNREACHABLE" or "n/a" based on host state_id or business_rule state
:rtype: str
'''
pass
def get_overall_state(self, services):
'''Get the host overall state including the host self status
and the status of its services
Compute the host overall state identifier, including:
- the acknowledged state
- the downtime state
The host overall state is (prioritized):
- an host not monitored (5)
- an host down (4)
- an host unreachable (3)
- an host downtimed (2)
- an host acknowledged (1)
- an host up (0)
If the host overall state is <= 2, then the host overall state is the maximum value
of the host overall state and all the host services overall states.
The overall state of an host is:
- 0 if the host is UP and all its services are OK
- 1 if the host is DOWN or UNREACHABLE and acknowledged or
at least one of its services is acknowledged and
no other services are WARNING or CRITICAL
- 2 if the host is DOWN or UNREACHABLE and in a scheduled downtime or
at least one of its services is in a scheduled downtime and no
other services are WARNING or CRITICAL
- 3 if the host is UNREACHABLE or
at least one of its services is WARNING
- 4 if the host is DOWN or
at least one of its services is CRITICAL
- 5 if the host is not monitored
:param services: a list of known services
:type services: alignak.objects.service.Services
:return: the host overall state
:rtype: int
'''
pass
| 52 | 49 | 19 | 2 | 9 | 7 | 3 | 0.71 | 1 | 3 | 0 | 0 | 49 | 10 | 50 | 163 | 1,181 | 165 | 597 | 118 | 545 | 423 | 350 | 116 | 299 | 15 | 4 | 4 | 144 |
4,111 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/host.py
|
alignak.objects.host.Hosts
|
class Hosts(SchedulingItems):
"""Class for the hosts lists. It's mainly for configuration
"""
name_property = "host_name"
inner_class = Host
# pylint: disable=too-many-arguments
def linkify(self, timeperiods=None, commands=None, contacts=None,
realms=None, resultmodulations=None, businessimpactmodulations=None,
escalations=None, hostgroups=None,
checkmodulations=None, macromodulations=None):
"""Create link between objects::
* hosts -> timeperiods
* hosts -> hosts (parents, etc)
* hosts -> commands (check_command)
* hosts -> contacts
:param timeperiods: timeperiods to link
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param commands: commands to link
:type commands: alignak.objects.command.Commands
:param contacts: contacts to link
:type contacts: alignak.objects.contact.Contacts
:param realms: realms to link
:type realms: alignak.objects.realm.Realms
:param resultmodulations: resultmodulations to link
:type resultmodulations: alignak.objects.resultmodulation.Resultmodulations
:param businessimpactmodulations: businessimpactmodulations to link
:type businessimpactmodulations:
alignak.objects.businessimpactmodulation.Businessimpactmodulations
:param escalations: escalations to link
:type escalations: alignak.objects.escalation.Escalations
:param hostgroups: hostgroups to link
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:param checkmodulations: checkmodulations to link
:type checkmodulations: alignak.objects.checkmodulation.Checkmodulations
:param macromodulations: macromodulations to link
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:return: None
"""
self.linkify_with_timeperiods(timeperiods, 'notification_period')
self.linkify_with_timeperiods(timeperiods, 'check_period')
self.linkify_with_timeperiods(timeperiods, 'maintenance_period')
self.linkify_with_timeperiods(timeperiods, 'snapshot_period')
self.linkify_h_by_h()
self.linkify_h_by_hg(hostgroups)
self.linkify_with_commands(commands, 'check_command')
self.linkify_with_commands(commands, 'event_handler')
self.linkify_with_commands(commands, 'snapshot_command')
self.linkify_with_contacts(contacts)
# No more necessary
self.linkify_h_by_realms(realms)
self.linkify_with_result_modulations(resultmodulations)
self.linkify_with_business_impact_modulations(businessimpactmodulations)
# WARNING: all escalations will not be link here
# (just the escalation here, not serviceesca or hostesca).
# This last one will be link in escalations linkify.
self.linkify_with_escalations(escalations)
self.linkify_with_check_modulations(checkmodulations)
self.linkify_with_macro_modulations(macromodulations)
def fill_predictive_missing_parameters(self):
"""Loop on hosts and call Host.fill_predictive_missing_parameters()
:return: None
"""
for host in self:
host.fill_predictive_missing_parameters()
def linkify_h_by_h(self):
"""Link hosts with their parents
:return: None
"""
for host in self:
# The new member list
new_parents = []
for parent in getattr(host, 'parents', []):
parent = parent.strip()
o_parent = self.find_by_name(parent)
if o_parent is not None:
new_parents.append(o_parent.uuid)
else:
self.add_error("the parent '%s' for the host '%s' is unknown"
% (parent, host.get_name()))
# We find the id, we replace the names
host.parents = new_parents
def linkify_h_by_realms(self, realms):
"""Link hosts with realms
:param realms: realms object to link with
:type realms: alignak.objects.realm.Realms
:return: None
"""
default_realm = realms.get_default()
for host in self:
if not getattr(host, 'realm', None):
# Applying default realm to an host
host.realm = default_realm.uuid if default_realm else ''
host.realm_name = default_realm.get_name() if default_realm else ''
host.got_default_realm = True
if host.realm not in realms:
realm = realms.find_by_name(host.realm)
if not realm:
continue
host.realm = realm.uuid
else:
realm = realms[host.realm]
def linkify_h_by_hg(self, hostgroups):
"""Link hosts with hostgroups
:param hostgroups: hostgroups object to link with
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: None
"""
# Register host in the hostgroups
for host in self:
new_hostgroups = []
if hasattr(host, 'hostgroups') and host.hostgroups != []:
hgs = [n.strip() for n in host.hostgroups if n.strip()]
for hg_name in hgs:
# TODO: should an unknown hostgroup raise an error ?
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is not None:
new_hostgroups.append(hostgroup.uuid)
else:
err = ("the hostgroup '%s' of the host '%s' is "
"unknown" % (hg_name, host.host_name))
host.add_error(err)
host.hostgroups = new_hostgroups
def explode(self, hostgroups, contactgroups):
"""Explode hosts with hostgroups, contactgroups::
* Add contact from contactgroups to host contacts
* Add host into their hostgroups as hostgroup members
:param hostgroups: Hostgroups to explode
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:param contactgroups: Contactgorups to explode
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: None
"""
for template in list(self.templates.values()):
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(template, contactgroups)
# Register host in the hostgroups
for host in self:
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(host, contactgroups)
if hasattr(host, 'host_name') and hasattr(host, 'hostgroups'):
hname = host.host_name
for hostgroup in host.hostgroups:
hostgroups.add_member(hname, hostgroup.strip())
def apply_dependencies(self):
"""Loop on hosts and register dependency between parent and son
:return: None
"""
for host in self:
for parent_id in getattr(host, 'parents', None):
if not parent_id:
continue
parent = self[parent_id]
if parent.active_checks_enabled:
# Add parent in the list
host.act_depend_of.append(
(parent_id, ['d', 'x', 's', 'f'], '', True)
)
# Add child in the parent
parent.act_depend_of_me.append(
(host.uuid, ['d', 'x', 's', 'f'], '', True)
)
# And add the parent/child dep filling too, for broking
if host.uuid not in parent.child_dependencies:
parent.child_dependencies.append(host.uuid)
if parent_id not in host.parent_dependencies:
host.parent_dependencies.append(parent_id)
def find_hosts_that_use_template(self, tpl_name):
"""Find hosts that use the template defined in argument tpl_name
:param tpl_name: the template name we filter or
:type tpl_name: str
:return: list of the host_name of the hosts that got the template tpl_name in tags
:rtype: list[str]
"""
return [h.host_name for h in self if tpl_name in h.tags if hasattr(h, "host_name")]
def is_correct(self):
"""Check if the hosts list configuration is correct ::
* check if any loop exists in each host dependencies
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
state = True
# Internal checks before executing inherited function...
loop = self.no_loop_in_parents("self", "parents")
if loop:
self.add_error("Loop detected while checking hosts")
state = False
for uuid, item in list(self.items.items()):
for elem in loop:
if elem == uuid:
self.add_error("Host %s is parent in dependency defined in %s"
% (item.get_name(), item.imported_from))
elif elem in item.parents:
self.add_error("Host %s is child in dependency defined in %s"
% (self[elem].get_name(), self[elem].imported_from))
return super(Hosts, self).is_correct() and state
|
class Hosts(SchedulingItems):
'''Class for the hosts lists. It's mainly for configuration
'''
def linkify(self, timeperiods=None, commands=None, contacts=None,
realms=None, resultmodulations=None, businessimpactmodulations=None,
escalations=None, hostgroups=None,
checkmodulations=None, macromodulations=None):
'''Create link between objects::
* hosts -> timeperiods
* hosts -> hosts (parents, etc)
* hosts -> commands (check_command)
* hosts -> contacts
:param timeperiods: timeperiods to link
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param commands: commands to link
:type commands: alignak.objects.command.Commands
:param contacts: contacts to link
:type contacts: alignak.objects.contact.Contacts
:param realms: realms to link
:type realms: alignak.objects.realm.Realms
:param resultmodulations: resultmodulations to link
:type resultmodulations: alignak.objects.resultmodulation.Resultmodulations
:param businessimpactmodulations: businessimpactmodulations to link
:type businessimpactmodulations:
alignak.objects.businessimpactmodulation.Businessimpactmodulations
:param escalations: escalations to link
:type escalations: alignak.objects.escalation.Escalations
:param hostgroups: hostgroups to link
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:param checkmodulations: checkmodulations to link
:type checkmodulations: alignak.objects.checkmodulation.Checkmodulations
:param macromodulations: macromodulations to link
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:return: None
'''
pass
def fill_predictive_missing_parameters(self):
'''Loop on hosts and call Host.fill_predictive_missing_parameters()
:return: None
'''
pass
def linkify_h_by_h(self):
'''Link hosts with their parents
:return: None
'''
pass
def linkify_h_by_realms(self, realms):
'''Link hosts with realms
:param realms: realms object to link with
:type realms: alignak.objects.realm.Realms
:return: None
'''
pass
def linkify_h_by_hg(self, hostgroups):
'''Link hosts with hostgroups
:param hostgroups: hostgroups object to link with
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: None
'''
pass
def explode(self, hostgroups, contactgroups):
'''Explode hosts with hostgroups, contactgroups::
* Add contact from contactgroups to host contacts
* Add host into their hostgroups as hostgroup members
:param hostgroups: Hostgroups to explode
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:param contactgroups: Contactgorups to explode
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: None
'''
pass
def apply_dependencies(self):
'''Loop on hosts and register dependency between parent and son
:return: None
'''
pass
def find_hosts_that_use_template(self, tpl_name):
'''Find hosts that use the template defined in argument tpl_name
:param tpl_name: the template name we filter or
:type tpl_name: str
:return: list of the host_name of the hosts that got the template tpl_name in tags
:rtype: list[str]
'''
pass
def is_correct(self):
'''Check if the hosts list configuration is correct ::
* check if any loop exists in each host dependencies
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
'''
pass
| 10 | 10 | 23 | 2 | 12 | 10 | 4 | 0.82 | 1 | 2 | 0 | 0 | 9 | 0 | 9 | 61 | 227 | 29 | 109 | 40 | 96 | 89 | 94 | 37 | 84 | 7 | 4 | 4 | 38 |
4,112 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/hostdependency.py
|
alignak.objects.hostdependency.Hostdependencies
|
class Hostdependencies(Items):
"""Hostdependencies manage a list of Hostdependency objects, used for parsing configuration
"""
inner_class = Hostdependency
def delete_host_dep_by_id(self, ids):
"""Delete a list of hostdependency
:param ids: ids list to delete
:type ids: list
:return: None
"""
for h_id in ids:
del self[h_id]
def explode(self, hostgroups):
# pylint: disable=too-many-locals
"""Explode all host dependency for each member of hostgroups
Each member of dependent hostgroup or hostgroup in dependency have to get a copy of
host dependencies (quite complex to parse)
:param hostgroups: used to look for hostgroup
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: None
"""
# The "old" dependencies will be removed. All dependencies with
# more than one host or a host group will be in it
to_be_removed = []
# Then for every host create a copy of the dependency with just the host
# because we are adding services, we can't just loop in it
for host_dep_id in list(self.items.keys()):
host_dep = self.items[host_dep_id]
# We explode first the dependent hosts (sons) part
son_hosts = []
if getattr(host_dep, 'dependent_hostgroup_name', ''):
hg_names = [g.strip() for g in host_dep.dependent_hostgroup_name.split(',')]
for hg_name in hg_names:
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is None:
host_dep.add_error("A hostdependency got an unknown "
"dependent_hostgroup_name '%s'" % hg_name)
continue
son_hosts.extend([m.strip() for m in hostgroup.get_hosts()])
if getattr(host_dep, 'dependent_host_name', ''):
son_hosts.extend([h.strip() for h in host_dep.dependent_host_name.split(',')])
# Ok, and now the depending hosts (self and parents) part :)
father_hosts = []
if getattr(host_dep, 'hostgroup_name', ''):
hg_names = [g.strip() for g in host_dep.hostgroup_name.split(',')]
for hg_name in hg_names:
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is None:
host_dep.add_error("A hostdependency got an unknown "
"hostgroup_name '%s'" % hg_name)
continue
father_hosts.extend([m.strip() for m in hostgroup.get_hosts()])
if getattr(host_dep, 'host_name', ''):
father_hosts.extend([h.strip() for h in host_dep.host_name.split(',')])
# Loop over all sons and fathers to get S*F host deps
for dep_hname in son_hosts:
dep_hname = dep_hname.strip()
for host_name in father_hosts:
new_hd = host_dep.copy()
new_hd.dependent_host_name = dep_hname
new_hd.host_name = host_name
new_hd.definition_order = 1
self.add_item(new_hd)
to_be_removed.append(host_dep_id)
self.delete_host_dep_by_id(to_be_removed)
def linkify(self, hosts, timeperiods):
"""Create link between objects::
* hostdependency -> host
* hostdependency -> timeperiods
:param hosts: hosts to link
:type hosts: alignak.objects.host.Hosts
:param timeperiods: timeperiods to link
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
"""
self.linkify_host_dep_by_host(hosts)
self.linkify_host_dep_by_timeperiod(timeperiods)
self.linkify_host_by_host_dep(hosts)
def linkify_host_dep_by_host(self, hosts):
"""Replace dependent_host_name and host_name
in host dependency by the real object
:param hosts: host list, used to look for a specific one
:type hosts: alignak.objects.host.Hosts
:return: None
"""
for host_dep in self:
host_name = getattr(host_dep, 'host_name', '')
if host_name:
host = hosts.find_by_name(host_name)
if host is None:
host_dep.add_error("got a bad host_name definition '%s'" % host_name)
if host:
host_dep.host_name = host.uuid
dep_host_name = getattr(host_dep, 'dependent_host_name', '')
if dep_host_name:
dep_host = hosts.find_by_name(dep_host_name)
if dep_host is None:
host_dep.add_error("got a bad dependent_host_name definition '%s'"
% dep_host_name)
if dep_host:
host_dep.dependent_host_name = dep_host.uuid
def linkify_host_dep_by_timeperiod(self, timeperiods):
"""Replace dependency_period by a real object in host dependency
:param timeperiods: list of timeperiod, used to look for a specific one
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
"""
for host_dep in self:
try:
timeperiod_name = getattr(host_dep, 'dependency_period', '')
if timeperiod_name:
timeperiod = timeperiods.find_by_name(timeperiod_name)
if timeperiod is None:
host_dep.add_error("got a bad dependency_period definition '%s'"
% timeperiod_name)
if timeperiod:
host_dep.dependency_period = timeperiod.uuid
except AttributeError as exp: # pragma: no cover, simple protectionn
logger.error("[hostdependency] fail to linkify by timeperiod: %s", exp)
def linkify_host_by_host_dep(self, hosts):
"""Add dependency in host objects
:param hosts: hosts list
:type hosts: alignak.objects.host.Hosts
:return: None
"""
for hostdep in self:
# Only used for debugging purpose when loops are detected
setattr(hostdep, "host_name_string", "undefined")
setattr(hostdep, "dependent_host_name_string", "undefined")
# if the host dep conf is bad, pass this one
if getattr(hostdep, 'host_name', None) is None or\
getattr(hostdep, 'dependent_host_name', None) is None:
continue
if hostdep.host_name not in hosts or hostdep.dependent_host_name not in hosts:
continue
hosts.add_act_dependency(hostdep.dependent_host_name, hostdep.host_name,
hostdep.notification_failure_criteria,
getattr(hostdep, 'dependency_period', ''),
hostdep.inherits_parent)
hosts.add_chk_dependency(hostdep.dependent_host_name, hostdep.host_name,
hostdep.execution_failure_criteria,
getattr(hostdep, 'dependency_period', ''),
hostdep.inherits_parent)
# Only used for debugging purpose when loops are detected
setattr(hostdep, "host_name_string", hosts[hostdep.host_name].get_name())
setattr(hostdep, "dependent_host_name_string",
hosts[hostdep.dependent_host_name].get_name())
def is_correct(self):
"""Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
state = True
# Internal checks before executing inherited function...
loop = self.no_loop_in_parents("host_name", "dependent_host_name")
if loop:
self.add_error("Loop detected while checking host dependencies:")
state = False
for item in self:
for elem in loop:
if elem == item.host_name:
self.add_error("- host %s is a parent host_name in dependency defined in %s"
% (item.host_name_string, item.imported_from))
elif elem == item.dependent_host_name:
self.add_error("- host %s is a child host_name in dependency defined in %s"
% (item.dependent_host_name_string, item.imported_from))
return super(Hostdependencies, self).is_correct() and state
|
class Hostdependencies(Items):
'''Hostdependencies manage a list of Hostdependency objects, used for parsing configuration
'''
def delete_host_dep_by_id(self, ids):
'''Delete a list of hostdependency
:param ids: ids list to delete
:type ids: list
:return: None
'''
pass
def explode(self, hostgroups):
'''Explode all host dependency for each member of hostgroups
Each member of dependent hostgroup or hostgroup in dependency have to get a copy of
host dependencies (quite complex to parse)
:param hostgroups: used to look for hostgroup
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: None
'''
pass
def linkify(self, hosts, timeperiods):
'''Create link between objects::
* hostdependency -> host
* hostdependency -> timeperiods
:param hosts: hosts to link
:type hosts: alignak.objects.host.Hosts
:param timeperiods: timeperiods to link
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
'''
pass
def linkify_host_dep_by_host(self, hosts):
'''Replace dependent_host_name and host_name
in host dependency by the real object
:param hosts: host list, used to look for a specific one
:type hosts: alignak.objects.host.Hosts
:return: None
'''
pass
def linkify_host_dep_by_timeperiod(self, timeperiods):
'''Replace dependency_period by a real object in host dependency
:param timeperiods: list of timeperiod, used to look for a specific one
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
'''
pass
def linkify_host_by_host_dep(self, hosts):
'''Add dependency in host objects
:param hosts: hosts list
:type hosts: alignak.objects.host.Hosts
:return: None
'''
pass
def is_correct(self):
'''Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
'''
pass
| 8 | 8 | 27 | 4 | 16 | 8 | 6 | 0.52 | 1 | 3 | 0 | 0 | 7 | 0 | 7 | 52 | 202 | 33 | 112 | 35 | 104 | 58 | 97 | 34 | 89 | 12 | 2 | 4 | 39 |
4,113 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/hostdependency.py
|
alignak.objects.hostdependency.Hostdependency
|
class Hostdependency(Item):
"""Hostdependency class is a simple implementation of host dependency as
defined in a monitoring context (dependency period, notification_failure_criteria ..)
"""
my_type = 'hostdependency'
my_name_property = "host_relation"
my_index_property = "host_relation"
properties = Item.properties.copy()
properties.update({
'dependent_host_name':
StringProp(),
'dependent_hostgroup_name':
StringProp(default=''),
'host_name':
StringProp(),
'hostgroup_name':
StringProp(default=''),
'inherits_parent':
BoolProp(default=False),
'execution_failure_criteria':
ListProp(default=['n'], split_on_comma=True),
'notification_failure_criteria':
ListProp(default=['n'], split_on_comma=True),
'dependency_period':
StringProp(default='')
})
def __init__(self, params, parsing=True):
# Update default options
for prop in ['execution_failure_criteria', 'notification_failure_criteria']:
if prop in params:
params[prop] = [p.replace('u', 'x') for p in params[prop]]
super(Hostdependency, self).__init__(params, parsing=parsing)
# def __str__(self): # pragma: no cover
# return '<Hostdependency %s %s, uuid=%s, use: %s />' \
# % ('template' if self.is_a_template() else '', self.get_full_name(), self.uuid,
# getattr(self, 'use', None))
# __repr__ = __str__
@property
def host_relation(self):
"""Unique key for a host dependency
:return: Tuple with host_name and dependent_host_name
:rtype: tuple
"""
return "{}->{}".format(getattr(self, 'host_name', 'unknown'),
getattr(self, 'dependent_host_name', 'independant'))
def get_full_name(self):
"""Get name based on dependent_host_name and host_name attributes
Each attribute is replaced with 'unknown' if attribute is not set
:return: dependent_host_name/host_name
:rtype: str
"""
if self.is_a_template():
return self.get_name()
return "{}->{}".format(getattr(self, 'host_name', 'unknown'),
getattr(self, 'dependent_host_name', 'independant'))
|
class Hostdependency(Item):
'''Hostdependency class is a simple implementation of host dependency as
defined in a monitoring context (dependency period, notification_failure_criteria ..)
'''
def __init__(self, params, parsing=True):
pass
@property
def host_relation(self):
'''Unique key for a host dependency
:return: Tuple with host_name and dependent_host_name
:rtype: tuple
'''
pass
def get_full_name(self):
'''Get name based on dependent_host_name and host_name attributes
Each attribute is replaced with 'unknown' if attribute is not set
:return: dependent_host_name/host_name
:rtype: str
'''
pass
| 5 | 3 | 8 | 1 | 4 | 3 | 2 | 0.49 | 1 | 1 | 0 | 0 | 3 | 0 | 3 | 37 | 63 | 8 | 37 | 10 | 32 | 18 | 17 | 9 | 13 | 3 | 3 | 2 | 6 |
4,114 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/hostescalation.py
|
alignak.objects.hostescalation.Hostescalation
|
class Hostescalation(Item):
"""Hostescalation class is used to implement notification escalation for hosts
TODO: Why this class does not inherit from alignak.objects.Escalation.
Maybe we can merge it
"""
my_type = 'hostescalation'
properties = Item.properties.copy()
properties.update({
'host_name':
StringProp(),
'hostgroup_name':
StringProp(),
'first_notification':
IntegerProp(),
'last_notification':
IntegerProp(),
'notification_interval':
IntegerProp(default=30), # like Nagios value
'escalation_period':
StringProp(default=''),
'escalation_options':
ListProp(default=['d', 'x', 'r']),
'contacts':
ListProp(default=[], merging='join', split_on_comma=True),
'contact_groups':
ListProp(default=[], merging='join', split_on_comma=True),
'first_notification_time':
IntegerProp(),
'last_notification_time':
IntegerProp(),
})
def __init__(self, params, parsing=True):
# Update default options
for prop in ['escalation_options']:
if prop in params:
params[prop] = [p.replace('u', 'x') for p in params[prop]]
super(Hostescalation, self).__init__(params, parsing=parsing)
self.fill_default()
|
class Hostescalation(Item):
'''Hostescalation class is used to implement notification escalation for hosts
TODO: Why this class does not inherit from alignak.objects.Escalation.
Maybe we can merge it
'''
def __init__(self, params, parsing=True):
pass
| 2 | 1 | 7 | 0 | 6 | 1 | 3 | 0.18 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 35 | 41 | 3 | 33 | 5 | 31 | 6 | 10 | 5 | 8 | 3 | 3 | 2 | 3 |
4,115 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/hostescalation.py
|
alignak.objects.hostescalation.Hostescalations
|
class Hostescalations(Items):
"""Hostescalations manage a list of Hostescalation objects, used for parsing configuration
"""
name_property = ""
inner_class = Hostescalation
def explode(self, escalations):
"""Create instance of Escalation for each HostEscalation object
:param escalations: list of escalation, used to add new ones
:type escalations: alignak.objects.escalation.Escalations
:return: None
"""
# Now we explode all escalations (host_name, hostgroup_name) to escalations
for escalation in self:
properties = escalation.__class__.properties
name = getattr(escalation, 'host_name', getattr(escalation, 'hostgroup_name', ''))
creation_dict = {
'escalation_name':
'Generated-HE-%s-%s' % (name, escalation.uuid)
}
for prop in properties:
if hasattr(escalation, prop):
creation_dict[prop] = getattr(escalation, prop)
escalations.add_escalation(Escalation(creation_dict))
|
class Hostescalations(Items):
'''Hostescalations manage a list of Hostescalation objects, used for parsing configuration
'''
def explode(self, escalations):
'''Create instance of Escalation for each HostEscalation object
:param escalations: list of escalation, used to add new ones
:type escalations: alignak.objects.escalation.Escalations
:return: None
'''
pass
| 2 | 2 | 20 | 2 | 12 | 6 | 4 | 0.53 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 46 | 27 | 4 | 15 | 9 | 13 | 8 | 12 | 9 | 10 | 4 | 2 | 3 | 4 |
4,116 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/hostextinfo.py
|
alignak.objects.hostextinfo.HostsExtInfo
|
class HostsExtInfo(Items):
"""HostsExtInfo manage HostExtInfo and propagate properties (listed before)
into Hosts if necessary
"""
name_property = "host_name"
inner_class = HostExtInfo
def merge(self, hosts):
"""Merge extended host information into services
:param hosts: hosts list, to look for a specific one
:type hosts: alignak.objects.host.Hosts
:return: None
"""
for extinfo in self:
host_name = extinfo.get_name()
host = hosts.find_by_name(host_name)
if host is not None:
# Fusion
self.merge_extinfo(host, extinfo)
@staticmethod
def merge_extinfo(host, extinfo):
"""Merge extended host information into a host
:param host: the host to edit
:type host: alignak.objects.host.Host
:param extinfo: the external info we get data from
:type extinfo: alignak.objects.hostextinfo.HostExtInfo
:return: None
"""
# Note that 2d_coords and 3d_coords are never merged, so not usable !
properties = ['notes', 'notes_url', 'icon_image', 'icon_image_alt',
'vrml_image', 'statusmap_image']
# host properties have precedence over hostextinfo properties
for prop in properties:
if getattr(host, prop) == '' and getattr(extinfo, prop) != '':
setattr(host, prop, getattr(extinfo, prop))
|
class HostsExtInfo(Items):
'''HostsExtInfo manage HostExtInfo and propagate properties (listed before)
into Hosts if necessary
'''
def merge(self, hosts):
'''Merge extended host information into services
:param hosts: hosts list, to look for a specific one
:type hosts: alignak.objects.host.Hosts
:return: None
'''
pass
@staticmethod
def merge_extinfo(host, extinfo):
'''Merge extended host information into a host
:param host: the host to edit
:type host: alignak.objects.host.Host
:param extinfo: the external info we get data from
:type extinfo: alignak.objects.hostextinfo.HostExtInfo
:return: None
'''
pass
| 4 | 3 | 15 | 1 | 6 | 8 | 3 | 1.13 | 1 | 0 | 0 | 0 | 1 | 0 | 2 | 47 | 39 | 5 | 16 | 11 | 12 | 18 | 14 | 10 | 11 | 3 | 2 | 2 | 6 |
4,117 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_daemon_start.py
|
tests.test_daemon_start.Test_Poller_Start
|
class Test_Poller_Start(TemplateDaemonStart, AlignakTest):
def setUp(self):
super(Test_Poller_Start, self).setUp()
daemon_cls = Poller
daemon_name = 'my_poller'
|
class Test_Poller_Start(TemplateDaemonStart, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 2 | 1 | 0 | 0 | 1 | 0 | 1 | 73 | 6 | 1 | 5 | 4 | 3 | 0 | 5 | 4 | 3 | 1 | 2 | 0 | 1 |
4,118 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/contact.py
|
alignak.objects.contact.Contacts
|
class Contacts(CommandCallItems):
"""Contacts manage a list of Contacts objects, used for parsing configuration
"""
inner_class = Contact
def linkify(self, commands, notificationways):
"""Create link between contacts and notification ways, and commands
:param commands: commands to link with
:type commands: alignak.objects.command.Commands
:param notificationways: notificationways to link with
:type notificationways: alignak.objects.notificationway.Notificationways
:return: None
"""
self.linkify_with_notificationways(notificationways)
self.linkify_with_commands(commands, 'service_notification_commands', is_a_list=True)
self.linkify_with_commands(commands, 'host_notification_commands', is_a_list=True)
def linkify_with_notificationways(self, notificationways):
"""Link contacts with notification ways
:param notificationways: notificationways to link with
:type notificationways: alignak.objects.notificationway.Notificationways
:return: None
"""
for i in self:
if not hasattr(i, 'notificationways'):
continue
new_notificationways = []
for nw_name in strip_and_uniq(i.notificationways):
notifway = notificationways.find_by_name(nw_name)
if notifway is not None:
new_notificationways.append(notifway.uuid)
else:
i.add_error("the notificationways named '%s' is unknown" % nw_name)
# Get the list, but first make elements unique
i.notificationways = list(set(new_notificationways))
# Update the contact host/service notification commands properties
i.get_notification_commands(notificationways, 'host', command_name=True)
i.get_notification_commands(notificationways, 'service', command_name=True)
def explode(self, contactgroups, notificationways):
"""Explode all contact for each contactsgroup
:param contactgroups: contactgroups to explode
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:param notificationways: notificationways to explode
:type notificationways: alignak.objects.notificationway.Notificationways
:return: None
"""
# Contactgroups property need to be fulfill for got the information
self.apply_partial_inheritance('contactgroups')
# _special properties maybe came from a template, so
# import them before grok ourselves
for prop in Contact.special_properties:
if prop == 'contact_name':
continue
self.apply_partial_inheritance(prop)
# Register ourselves into the contacts groups we are in
for contact in self:
if not (hasattr(contact, 'contact_name') and hasattr(contact, 'contactgroups')):
continue
for contactgroup in contact.contactgroups:
contactgroups.add_member(contact.contact_name, contactgroup.strip())
# Now create a notification way with the simple parameter of the contacts
for contact in self:
# Fill default values for all the properties
contact.fill_default()
# If some NW are still existing, do not create one more...
# if hasattr(contact, 'notificationways') and getattr(contact, 'notificationways'):
# # The contact still has some defined NWs
# continue
#
add_nw = False
params = {
'notificationway_name': "%s_inner_nw" % contact.get_name()
}
for prop, entry in list(NotificationWay.properties.items()):
if prop not in ['service_notification_period', 'host_notification_period',
'service_notification_options', 'host_notification_options',
'service_notification_commands', 'host_notification_commands',
'min_business_impact']:
continue
if getattr(contact, prop, None) is not None:
params[prop] = getattr(contact, prop)
if entry.has_default and getattr(contact, prop) != entry.default:
# Add a NW if no default values
logger.debug("Contact %s, add a notification way because: %s = %s",
contact.get_name(), prop, getattr(contact, prop))
add_nw = True
if not add_nw:
continue
notificationways.add_item(NotificationWay(params, parsing=True))
if not hasattr(contact, 'notificationways'):
contact.notificationways = []
contact.notificationways = list(contact.notificationways)
contact.notificationways.append(params['notificationway_name'])
|
class Contacts(CommandCallItems):
'''Contacts manage a list of Contacts objects, used for parsing configuration
'''
def linkify(self, commands, notificationways):
'''Create link between contacts and notification ways, and commands
:param commands: commands to link with
:type commands: alignak.objects.command.Commands
:param notificationways: notificationways to link with
:type notificationways: alignak.objects.notificationway.Notificationways
:return: None
'''
pass
def linkify_with_notificationways(self, notificationways):
'''Link contacts with notification ways
:param notificationways: notificationways to link with
:type notificationways: alignak.objects.notificationway.Notificationways
:return: None
'''
pass
def explode(self, contactgroups, notificationways):
'''Explode all contact for each contactsgroup
:param contactgroups: contactgroups to explode
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:param notificationways: notificationways to explode
:type notificationways: alignak.objects.notificationway.Notificationways
:return: None
'''
pass
| 4 | 4 | 34 | 5 | 18 | 11 | 6 | 0.63 | 1 | 4 | 2 | 0 | 3 | 0 | 3 | 50 | 110 | 19 | 56 | 15 | 52 | 35 | 49 | 15 | 45 | 13 | 3 | 4 | 19 |
4,119 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/hostgroup.py
|
alignak.objects.hostgroup.Hostgroups
|
class Hostgroups(Itemgroups):
"""
Class to manage list of Hostgroup
Hostgroups is used to regroup all Hostgroup
"""
inner_class = Hostgroup
def add_member(self, host_name, hostgroup_name):
"""Add a host string to a hostgroup member
if the host group do not exist, create it
:param host_name: host name
:type host_name: str
:param hostgroup_name:hostgroup name
:type hostgroup_name: str
:return: None
"""
group = self.find_by_name(hostgroup_name)
if group:
group.add_members(host_name)
return
group = Hostgroup({
'hostgroup_name': hostgroup_name, 'members': host_name})
self.add(group)
def get_members_of_group(self, gname):
"""Get all members of a group which name is given in parameter
:param gname: name of the group
:type gname: str
:return: list of the hosts in the group
:rtype: list[alignak.objects.host.Host]
"""
hostgroup = self.find_by_name(gname)
if hostgroup:
return hostgroup.get_hosts()
return []
def linkify(self, hosts=None, realms=None, forced_realms_hostgroups=True):
"""Link hostgroups with hosts and realms
:param hosts: all Hosts
:type hosts: alignak.objects.host.Hosts
:param realms: all Realms
:type realms: alignak.objects.realm.Realms
:return: None
"""
self.linkify_hostgroups_hosts(hosts)
self.linkify_hostgroups_realms_hosts(realms, hosts, forced_realms_hostgroups)
def linkify_hostgroups_hosts(self, hosts):
"""We just search for each hostgroup the id of the hosts
and replace the names by the found identifiers
:param hosts: object Hosts
:type hosts: alignak.objects.host.Hosts
:return: None
"""
for hostgroup in self:
members = hostgroup.get_hosts()
# The new members identifiers list
new_members = []
for member in members:
# member is an host name
member = member.strip()
if not member: # void entry, skip this
continue
if member == '*':
# All the hosts identifiers list
new_members.extend(list(hosts.items.keys()))
else:
host = hosts.find_by_name(member)
if host is not None:
new_members.append(host.uuid)
if hostgroup.uuid not in host.hostgroups:
host.hostgroups.append(hostgroup.uuid)
else:
hostgroup.add_unknown_members(member)
# Make members unique
new_members = list(set(new_members))
# We find the id, we replace the names
hostgroup.replace_members(new_members)
def linkify_hostgroups_realms_hosts(self, realms, hosts, forced_realms_hostgroups=True):
# pylint: disable=too-many-locals, too-many-nested-blocks, too-many-branches
"""Link between an hostgroup and a realm is already done in the configuration parsing
function that defines and checks the default satellites, realms, hosts and hosts groups
consistency.
This function will only raise some alerts if hosts groups and hosts that are contained
do not belong the same realm !
:param realms: object Realms
:type realms: alignak.objects.realm.Realms
:param hosts: object Realms
:type hosts: alignak.objects.host.Hosts
:return: None
"""
logger.debug("Hostgroups / hosts / realms relation")
for hostgroup in self:
hostgroup_realm_name = hostgroup.realm
if hostgroup.realm not in realms:
realm = realms.find_by_name(hostgroup.realm)
if not realm:
continue
hostgroup.realm = realm.uuid
else:
hostgroup_realm_name = realms[hostgroup.realm].get_name()
logger.debug("- hg: %s in the realm: %s ",
hostgroup.get_name(),
hostgroup_realm_name + (" (*)" if hostgroup.got_default_realm else ''))
hostgroup_hosts_errors = []
hostgroup_new_realm_name = None
hostgroup_new_realm_failed = False
for host_uuid in hostgroup:
if host_uuid not in hosts:
continue
host = hosts[host_uuid]
host_realm_name = host.realm
if host.realm not in realms:
host_realm = realms.find_by_name(host.realm)
if not host_realm:
# Host realm is unknown, an error will be raised elsewhere!
continue
else:
host_realm_name = realms[host.realm].get_name()
logger.debug(" host %s is in the realm: %s",
host.get_name(),
host_realm_name + (" (*)" if host.got_default_realm else ''))
if host.got_default_realm:
# If the host got a default realm it means that no realm is specifically
# declared for this host. Thus it can inherit its realm from the one of its
# hostgroup :)
logger.debug("- apply the realm %s to the host %s from a hostgroup rule (%s)",
hostgroup_realm_name, host.get_name(), hostgroup.get_name())
host.realm = hostgroup.realm
else:
# If the host has a realm that is specifically declared then it must the same
# as its hostgroup one!
if host.realm != hostgroup.realm:
# If the hostgroup had a specified realm
if not hostgroup.got_default_realm:
# raise an error !
hostgroup.add_error(
"host %s (realm: %s) is not in the same realm than its "
"hostgroup %s (realm: %s)"
% (host.get_name(), host_realm_name,
hostgroup.get_name(), hostgroup_realm_name))
else:
# The hosts group had no realm set, it got the default All realm
if forced_realms_hostgroups:
# Temporary log an error...
hostgroup_hosts_errors.append(
"host %s (realm: %s) is not in the same realm than its "
"hostgroup %s (realm: %s)"
% (host.get_name(), host_realm_name,
hostgroup.get_name(), hostgroup_realm_name))
if not hostgroup_new_realm_name or \
hostgroup_new_realm_name == host_realm_name:
# Potential new host group realm
hostgroup_new_realm_name = host_realm_name
else:
# It still exists a candidate realm for the hostgroup,
# raise an error !
hostgroup.add_error("got the default realm but it has some "
"hosts that are from different realms: "
"%s and %s. The defined realm "
"cannot be adjusted!"
% (hostgroup_new_realm_name,
host_realm_name))
hostgroup_new_realm_failed = True
break
else:
# I tolerate some hosts from different realms in an hostgroup
# that is in the default realm
# Temporary log an error...
hostgroup_hosts_errors.append(
"host %s (realm: %s) is not in the same realm as its "
"hostgroup %s (realm: %s)"
% (host.get_name(), host_realm_name,
hostgroup.get_name(), hostgroup_realm_name))
if not forced_realms_hostgroups:
for error in hostgroup_hosts_errors:
# hostgroup.add_warning(error)
logger.info(error)
else:
if hostgroup_new_realm_name is None:
# Do not change the hostgroup realm, it is not possible,
# so raise the host individual errors!
for error in hostgroup_hosts_errors:
hostgroup.add_error(error)
elif hostgroup_new_realm_name:
if not hostgroup_new_realm_failed:
# Change the hostgroup realm to suit its hosts
hostgroup.add_warning("hostgroup %s gets the realm of its hosts: %s"
% (hostgroup.get_name(), hostgroup_new_realm_name))
hostgroup_new_realm = realms.find_by_name(hostgroup_new_realm_name)
hostgroup.realm = hostgroup_new_realm.uuid
def explode(self):
"""
Fill members with hostgroup_members
:return: None
"""
# We do not want a same hostgroup to be exploded again and again
# so we tag it
for tmp_hg in list(self.items.values()):
tmp_hg.already_exploded = False
for hostgroup in list(self.items.values()):
if hostgroup.already_exploded:
continue
# get_hosts_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_hg in list(self.items.values()):
tmp_hg.rec_tag = False
hostgroup.get_hosts_by_explosion(self)
# We clean the tags
for tmp_hg in list(self.items.values()):
if hasattr(tmp_hg, 'rec_tag'):
del tmp_hg.rec_tag
del tmp_hg.already_exploded
|
class Hostgroups(Itemgroups):
'''
Class to manage list of Hostgroup
Hostgroups is used to regroup all Hostgroup
'''
def add_member(self, host_name, hostgroup_name):
'''Add a host string to a hostgroup member
if the host group do not exist, create it
:param host_name: host name
:type host_name: str
:param hostgroup_name:hostgroup name
:type hostgroup_name: str
:return: None
'''
pass
def get_members_of_group(self, gname):
'''Get all members of a group which name is given in parameter
:param gname: name of the group
:type gname: str
:return: list of the hosts in the group
:rtype: list[alignak.objects.host.Host]
'''
pass
def linkify(self, hosts=None, realms=None, forced_realms_hostgroups=True):
'''Link hostgroups with hosts and realms
:param hosts: all Hosts
:type hosts: alignak.objects.host.Hosts
:param realms: all Realms
:type realms: alignak.objects.realm.Realms
:return: None
'''
pass
def linkify_hostgroups_hosts(self, hosts):
'''We just search for each hostgroup the id of the hosts
and replace the names by the found identifiers
:param hosts: object Hosts
:type hosts: alignak.objects.host.Hosts
:return: None
'''
pass
def linkify_hostgroups_realms_hosts(self, realms, hosts, forced_realms_hostgroups=True):
'''Link between an hostgroup and a realm is already done in the configuration parsing
function that defines and checks the default satellites, realms, hosts and hosts groups
consistency.
This function will only raise some alerts if hosts groups and hosts that are contained
do not belong the same realm !
:param realms: object Realms
:type realms: alignak.objects.realm.Realms
:param hosts: object Realms
:type hosts: alignak.objects.host.Hosts
:return: None
'''
pass
def explode(self):
'''
Fill members with hostgroup_members
:return: None
'''
pass
| 7 | 7 | 37 | 3 | 22 | 12 | 7 | 0.59 | 1 | 3 | 1 | 0 | 6 | 0 | 6 | 52 | 234 | 25 | 132 | 29 | 125 | 78 | 97 | 29 | 90 | 21 | 3 | 7 | 40 |
4,120 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_properties_override.py
|
tests.test_properties_override.TestPropertyOverride
|
class TestPropertyOverride(AlignakTest):
def setUp(self):
super(TestPropertyOverride, self).setUp()
self.setup_with_file('cfg/cfg_property_override.cfg')
assert self.conf_is_correct
self._sched = self._scheduler
def test_service_property_override(self):
""" Property override """
svc1 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_01", "srv-svc")
svc2 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_02", "srv-svc")
svc1proc1 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_01", "proc proc1")
svc1proc2 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_01", "proc proc2")
svc2proc1 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_02", "proc proc1")
svc2proc2 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_02", "proc proc2")
tp24x7 = self._arbiter.conf.timeperiods.find_by_name("24x7")
tp_none = self._arbiter.conf.timeperiods.find_by_name("none")
tptest = self._arbiter.conf.timeperiods.find_by_name("testperiod")
cgtest = self._arbiter.conf.contactgroups.find_by_name("test_contact")
cgadm = self._arbiter.conf.contactgroups.find_by_name("admins")
cmdsvc = self._arbiter.conf.commands.find_by_name("check_service")
cmdtest = self._arbiter.conf.commands.find_by_name("dummy_command")
svc12 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_01", "srv-svc2")
svc22 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_02", "srv-svc2")
# Checks we got the objects we need
assert svc1 is not None
assert svc2 is not None
assert svc1proc1 is not None
assert svc1proc2 is not None
assert svc2proc1 is not None
assert svc2proc2 is not None
assert tp24x7 is not None
assert tptest is not None
assert cgtest is not None
assert cgadm is not None
assert cmdsvc is not None
assert cmdtest is not None
assert svc12 is not None
assert svc22 is not None
# Check non overriden properies value
for svc in (svc1, svc1proc1, svc1proc2, svc2proc1, svc12):
assert ["test_contact"] == svc.contact_groups
assert self._arbiter.conf.timeperiods[tp24x7.uuid].get_name() == \
self._arbiter.conf.timeperiods[svc.maintenance_period].get_name()
assert 1 == svc.retry_interval
assert self._arbiter.conf.commands[cmdsvc.uuid] is \
self._arbiter.conf.commands[svc.check_command.command.uuid]
# The list may not be in this order!
# assert ["w", "u", "x", "c", "r", "f", "s"] == svc.notification_options
assert 7 == len(svc.notification_options)
assert 'x' in svc.notification_options
assert 'f' in svc.notification_options
assert 'u' in svc.notification_options
assert 'r' in svc.notification_options
assert 's' in svc.notification_options
assert 'w' in svc.notification_options
assert 'c' in svc.notification_options
assert True is svc.notifications_enabled
# Check overriden properies value
for svc in (svc2, svc2proc2, svc22):
assert ["admins"] == svc.contact_groups
assert self._arbiter.conf.timeperiods[tptest.uuid].get_name() == \
self._arbiter.conf.timeperiods[svc.maintenance_period].get_name()
assert 3 == svc.retry_interval
assert self._arbiter.conf.commands[cmdtest.uuid] is \
self._arbiter.conf.commands[svc.check_command.command.uuid]
assert ["c","r"] == svc.notification_options
assert False is svc.notifications_enabled
|
class TestPropertyOverride(AlignakTest):
def setUp(self):
pass
def test_service_property_override(self):
''' Property override '''
pass
| 3 | 1 | 35 | 2 | 30 | 3 | 2 | 0.1 | 1 | 1 | 0 | 0 | 2 | 1 | 2 | 57 | 72 | 5 | 61 | 20 | 58 | 6 | 57 | 20 | 54 | 3 | 2 | 1 | 4 |
4,121 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/log.py
|
alignak.log.CollectorHandler
|
class CollectorHandler(Handler):
"""
This logging handler collects all the emitted logs in an inner list.
Note: This s only used for unit tests purpose
"""
def __init__(self):
Handler.__init__(self, logging.DEBUG)
self.collector = []
def emit(self, record):
try:
msg = self.format(record)
self.collector.append(msg)
except TypeError: # pragma: no cover, simple protection
self.handleError(record)
|
class CollectorHandler(Handler):
'''
This logging handler collects all the emitted logs in an inner list.
Note: This s only used for unit tests purpose
'''
def __init__(self):
pass
def emit(self, record):
pass
| 3 | 1 | 5 | 0 | 5 | 1 | 2 | 0.5 | 1 | 1 | 0 | 0 | 2 | 1 | 2 | 22 | 17 | 3 | 10 | 5 | 7 | 5 | 10 | 5 | 7 | 2 | 3 | 1 | 3 |
4,122 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_macros_resolver.py
|
tests.test_macros_resolver.TestMacroResolverWithEnv
|
class TestMacroResolverWithEnv(MacroResolverTester, AlignakTest):
"""Test without enabled environment macros"""
def setUp(self):
super(TestMacroResolverWithEnv, self).setUp()
# Do not provide environment file to use the default one
self.setup_file = 'cfg/cfg_macroresolver.cfg'
self.setup_with_file(self.setup_file, dispatching=True)
assert self.conf_is_correct
# Get an initialized macro resolver object
self.mr = MacroResolver()
self.mr.init(self._scheduler.pushed_conf)
# Default prefix
assert self.mr.env_prefix == 'ALIGNAK_'
|
class TestMacroResolverWithEnv(MacroResolverTester, AlignakTest):
'''Test without enabled environment macros'''
def setUp(self):
pass
| 2 | 1 | 14 | 3 | 8 | 3 | 1 | 0.44 | 2 | 2 | 1 | 0 | 1 | 2 | 1 | 75 | 17 | 4 | 9 | 4 | 7 | 4 | 9 | 4 | 7 | 1 | 2 | 0 | 1 |
4,123 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/modules/inner_retention.py
|
alignak.modules.inner_retention.InnerRetention
|
class InnerRetention(BaseModule):
"""
This class is used to store/restore retention data
"""
def __init__(self, mod_conf): # pylint: disable=too-many-branches
"""Module initialization
mod_conf is a dictionary that contains:
- all the variables declared in the module configuration
- a 'properties' value that is the module properties as defined globally in this file
If some environment variables exist the metrics they will take precedence over the
configuration parameters
'ALIGNAK_RETENTION_DIR'
the retention files directory
'ALIGNAK_RETENTION_FILE'
the retention unique file for the current scheduler
:param mod_conf: module configuration file as a dictionary
"""
BaseModule.__init__(self, mod_conf)
# pylint: disable=global-statement
global logger
logger = logging.getLogger('alignak.module.%s' % self.alias)
logger.setLevel(getattr(mod_conf, 'log_level', logging.INFO))
logger.debug("inner properties: %s", self.__dict__)
logger.debug("received configuration: %s", mod_conf.__dict__)
logger.info("loaded by the %s '%s'", self.my_daemon.type, self.my_daemon.name)
stats_host = getattr(mod_conf, 'statsd_host', 'localhost')
stats_port = int(getattr(mod_conf, 'statsd_port', '8125'))
stats_prefix = getattr(mod_conf, 'statsd_prefix', 'alignak')
statsd_enabled = (getattr(mod_conf, 'statsd_enabled', '0') != '0')
if isinstance(getattr(mod_conf, 'statsd_enabled', '0'), bool):
statsd_enabled = getattr(mod_conf, 'statsd_enabled')
graphite_enabled = (getattr(mod_conf, 'graphite_enabled', '0') != '0')
if isinstance(getattr(mod_conf, 'graphite_enabled', '0'), bool):
graphite_enabled = getattr(mod_conf, 'graphite_enabled')
logger.info("StatsD configuration: %s:%s, prefix: %s, enabled: %s, graphite: %s",
stats_host, stats_port, stats_prefix, statsd_enabled, graphite_enabled)
self.statsmgr = Stats()
# Configure our Stats manager
if not graphite_enabled:
self.statsmgr.register(self.alias, 'module',
statsd_host=stats_host, statsd_port=stats_port,
statsd_prefix=stats_prefix, statsd_enabled=statsd_enabled)
else:
self.statsmgr.connect(self.alias, 'module',
host=stats_host, port=stats_port,
prefix=stats_prefix, enabled=True)
self.enabled = getattr(mod_conf, 'enabled', '0') != '0'
if isinstance(getattr(mod_conf, 'enabled', '0'), bool):
self.enabled = getattr(mod_conf, 'enabled')
if not self.enabled:
logger.warning("inner retention module is loaded but is not enabled.")
return
self.retention_dir = getattr(mod_conf, 'retention_dir', None)
if os.getenv('ALIGNAK_RETENTION_DIR', None):
self.retention_dir = os.getenv('ALIGNAK_RETENTION_DIR')
if not self.retention_dir:
self.retention_dir = os.path.join(tempfile.gettempdir(), 'alignak', 'retention')
if '%s' in self.retention_dir:
self.retention_dir = self.retention_dir % self.my_daemon.name
self.retention_file = getattr(mod_conf, 'retention_file', None)
logger.info("inner retention module, retention file: %s", self.retention_file)
if os.getenv('ALIGNAK_RETENTION_FILE', None):
self.retention_file = os.getenv('ALIGNAK_RETENTION_FILE')
if self.retention_file is None:
self.retention_file = os.path.join(self.retention_dir, 'alignak-retention-%s.json')
if '%s' in self.retention_file:
self.retention_file = self.retention_file % self.my_daemon.name
if self.retention_dir and not os.path.isdir(self.retention_dir):
logger.info("The configured state retention directory (%s) does not exist. "
"Trying to create....", self.retention_dir)
try:
os.makedirs(self.retention_dir)
logger.warning("Retention directory created: %s.", self.retention_dir)
except OSError as exp:
logger.error("Directory creation failed because: %s", str(exp))
self.retention_dir = '/tmp'
logger.info("The retention directory is set to: %s", self.retention_dir)
logger.info("inner retention module, enabled: %s, retention dir: %s, retention file: %s",
self.enabled, self.retention_dir, self.retention_file)
if not self.retention_file:
logger.info("The retention file is set as an empty file. The module will "
"create a file for each host in the retention directory.")
else:
logger.info("The retention file is set as a unique scheduler file. "
"The module will create one file for each scheduler "
"with all hosts in the retention directory.")
# Common functions
def do_loop_turn(self):
"""This function is called/used when you need a module with
a loop function (and use the parameter 'external': True)
"""
logger.info("[Inner Retention] In loop")
time.sleep(1)
def hook_load_retention(self, scheduler): # pylint: disable=too-many-locals, too-many-branches
"""Load retention data from a file
:param scheduler: scheduler instance of alignak
:type scheduler: alignak.scheduler.Scheduler
:return: None
"""
if not self.enabled:
logger.warning("Alignak retention module is not enabled."
"Loading objects state is not possible.")
return None
if self.retention_file and not os.path.isfile(self.retention_file):
logger.info("The configured state retention file (%s) does not exist. "
"Loading objects state is not available.", self.retention_file)
return None
if self.retention_dir and not os.path.isdir(self.retention_dir):
logger.info("The configured state retention directory (%s) does not exist. "
"Loading objects state is not available.", self.retention_dir)
return None
all_data = {'hosts': {}, 'services': {}}
retention_files = []
if self.retention_file:
retention_files = [self.retention_file]
else:
if self.retention_dir:
for root, _, walk_files in os.walk(self.retention_dir, followlinks=True):
for found_file in walk_files:
if not re.search(r"\.json$", found_file):
continue
retention_files.append(os.path.join(root, found_file))
logger.debug("Loading retention files: %s ", retention_files)
if retention_files:
logger.info("Loading retention data from %d files", len(retention_files))
start_time = time.time()
for retention_file in retention_files:
# Get data from the retention files
try:
logger.debug('Loading data from: %s', retention_file)
with open(retention_file, "r") as fd:
response = json.load(fd)
if not isinstance(response, list):
response = [response]
# Is looks like a list of host dictionaries ?
if isinstance(response[0], dict) and 'name' in response[0]:
logger.debug('Loaded: %s', response)
else:
logger.warning("Supposed retention file %s is not correctly encoded! "
"It is probably not a retention file.", retention_file)
continue
except Exception as exp: # pylint: disable=broad-except
# pragma: no cover, should never happen...
logger.warning("Error when loading retention data from %s", retention_file)
logger.exception(exp)
else:
for host in response:
hostname = host['name']
service_key = 'services'
if 'retention_services' in host:
service_key = 'retention_services'
if service_key in host:
for service in host[service_key]:
all_data['services'][(host['name'], service)] = \
host[service_key][service]
all_data['hosts'][hostname] = host
logger.debug('- loaded: %s', host)
try:
logger.info('%d hosts loaded from retention', len(all_data['hosts']))
self.statsmgr.counter('retention-load.hosts', len(all_data['hosts']))
logger.info('%d services loaded from retention', len(all_data['services']))
self.statsmgr.counter('retention-load.services', len(all_data['services']))
self.statsmgr.timer('retention-load.time', time.time() - start_time)
# Restore the scheduler objects
scheduler.restore_retention_data(all_data)
logger.info("Retention data loaded in %s seconds", (time.time() - start_time))
except Exception as exp: # pylint: disable=broad-except
logger.warning("Retention load failed: %s", exp)
logger.exception(exp)
return False
return True
def hook_save_retention(self, scheduler):
"""Save retention data to a Json formated file
:param scheduler: scheduler instance of alignak
:type scheduler: alignak.scheduler.Scheduler
:return: None
"""
if not self.enabled:
logger.warning("Alignak retention module is not enabled."
"Saving objects state is not possible.")
return None
try:
start_time = time.time()
# Get retention data from the scheduler
data_to_save = scheduler.get_retention_data()
if not data_to_save:
logger.warning("Alignak retention data to save are not containing any information.")
return None
# Move services data to their respective hosts dictionary
# Alignak scheduler do not merge the services into the host dictionary!
for host_name in data_to_save['hosts']:
data_to_save['hosts'][host_name]['services'] = {}
data_to_save['hosts'][host_name]['name'] = host_name
for host_name, service_description in data_to_save['services']:
data_to_save['hosts'][host_name]['services'][service_description] = \
data_to_save['services'][(host_name, service_description)]
try:
if not self.retention_file:
logger.info('Saving retention data to: %s', self.retention_dir)
for host_name in data_to_save['hosts']:
file_name = os.path.join(self.retention_dir,
self.retention_file,
"%s.json" % host_name)
with open(file_name, "w") as fd:
fd.write(json.dumps(data_to_save['hosts'][host_name],
indent=2, separators=(',', ':'),
default=default_serialize,
sort_keys=True))
logger.debug('- saved: %s', file_name)
logger.info('Saved')
else:
logger.info('Saving retention data to: %s', self.retention_file)
with open(self.retention_file, "w") as fd:
fd.write(json.dumps(data_to_save['hosts'],
indent=2, separators=(',', ':'),
default=default_serialize,
sort_keys=True))
logger.info('Saved')
except Exception as exp: # pylint: disable=broad-except
# pragma: no cover, should never happen...
logger.warning("Error when saving retention data to %s", self.retention_file)
logger.exception(exp)
logger.info('%d hosts saved in retention', len(data_to_save['hosts']))
self.statsmgr.counter('retention-save.hosts', len(data_to_save['hosts']))
logger.info('%d services saved in retention', len(data_to_save['services']))
self.statsmgr.counter('retention-save.services', len(data_to_save['services']))
self.statsmgr.timer('retention-save.time', time.time() - start_time)
logger.info("Retention data saved in %s seconds", (time.time() - start_time))
except Exception as exp: # pylint: disable=broad-except
self.enabled = False
logger.warning("Retention save failed: %s", exp)
logger.exception(exp)
return False
return True
|
class InnerRetention(BaseModule):
'''
This class is used to store/restore retention data
'''
def __init__(self, mod_conf):
'''Module initialization
mod_conf is a dictionary that contains:
- all the variables declared in the module configuration
- a 'properties' value that is the module properties as defined globally in this file
If some environment variables exist the metrics they will take precedence over the
configuration parameters
'ALIGNAK_RETENTION_DIR'
the retention files directory
'ALIGNAK_RETENTION_FILE'
the retention unique file for the current scheduler
:param mod_conf: module configuration file as a dictionary
'''
pass
def do_loop_turn(self):
'''This function is called/used when you need a module with
a loop function (and use the parameter 'external': True)
'''
pass
def hook_load_retention(self, scheduler):
'''Load retention data from a file
:param scheduler: scheduler instance of alignak
:type scheduler: alignak.scheduler.Scheduler
:return: None
'''
pass
def hook_save_retention(self, scheduler):
'''Save retention data to a Json formated file
:param scheduler: scheduler instance of alignak
:type scheduler: alignak.scheduler.Scheduler
:return: None
'''
pass
| 5 | 5 | 66 | 9 | 49 | 10 | 11 | 0.23 | 1 | 8 | 1 | 0 | 4 | 5 | 4 | 24 | 272 | 38 | 195 | 37 | 189 | 45 | 165 | 31 | 159 | 19 | 2 | 5 | 44 |
4,124 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/modulesmanager.py
|
alignak.modulesmanager.ModulesManager
|
class ModulesManager(object):
"""This class is used to manage modules and call callback"""
def __init__(self, daemon):
"""
:param daemon: the daemon for which modules manager is created
:type daemon: alignak.Daemon
"""
self.daemon = daemon
self.daemon_type = daemon.type
self.daemon_name = daemon.name
self.modules = {}
self.modules_assoc = []
self.instances = []
self.to_restart = []
# By default the modules configuration is correct and the
# warnings and errors lists are empty
self.configuration_is_correct = True
self.configuration_warnings = []
self.configuration_errors = []
logger.debug("Created a module manager for '%s'", self.daemon_name)
def set_daemon_name(self, daemon_name):
"""Set the daemon name of the daemon which this manager is attached to
and propagate this daemon name to our managed modules
:param daemon_name:
:return:
"""
self.daemon_name = daemon_name
for instance in self.instances:
instance.set_loaded_into(daemon_name)
def load_and_init(self, modules):
"""Import, instantiate & "init" the modules we manage
:param modules: list of the managed modules
:return: True if no errors
"""
self.load(modules)
self.get_instances()
return len(self.configuration_errors) == 0
def load(self, modules):
"""Load Python modules and check their usability
:param modules: list of the modules that must be loaded
:return:
"""
self.modules_assoc = []
for module in modules:
if not module.enabled:
logger.info("Module %s is declared but not enabled", module.name)
# Store in our modules list but do not try to load
# Probably someone else will load this module later...
self.modules[module.uuid] = module
continue
logger.info("Importing Python module '%s' for %s...", module.python_name, module.name)
try:
python_module = importlib.import_module(module.python_name)
# Check existing module properties
# Todo: check all mandatory properties
if not hasattr(python_module, 'properties'): # pragma: no cover
self.configuration_errors.append("Module %s is missing a 'properties' "
"dictionary" % module.python_name)
raise AttributeError
logger.info("Module properties: %s", getattr(python_module, 'properties'))
# Check existing module get_instance method
if not hasattr(python_module, 'get_instance') or \
not isinstance(getattr(python_module, 'get_instance'),
collections.Callable): # pragma: no cover
self.configuration_errors.append("Module %s is missing a 'get_instance' "
"function" % module.python_name)
raise AttributeError
self.modules_assoc.append((module, python_module))
logger.info("Imported '%s' for %s", module.python_name, module.name)
except ImportError as exp: # pragma: no cover, simple protection
self.configuration_errors.append("Module %s (%s) can't be loaded, Python "
"importation error: %s" % (module.python_name,
module.name,
str(exp)))
except AttributeError: # pragma: no cover, simple protection
self.configuration_errors.append("Module %s (%s) can't be loaded, "
"module configuration" % (module.python_name,
module.name))
else:
logger.info("Loaded Python module '%s' (%s)", module.python_name, module.name)
def try_instance_init(self, instance, late_start=False):
"""Try to "initialize" the given module instance.
:param instance: instance to init
:type instance: object
:param late_start: If late_start, don't look for last_init_try
:type late_start: bool
:return: True on successful init. False if instance init method raised any Exception.
:rtype: bool
"""
try:
instance.init_try += 1
# Maybe it's a retry
if not late_start and instance.init_try > 1:
# Do not try until too frequently, or it's too loopy
if instance.last_init_try > time.time() - MODULE_INIT_PERIOD:
logger.info("Too early to retry initialization, retry period is %d seconds",
MODULE_INIT_PERIOD)
# logger.info("%s / %s", instance.last_init_try, time.time())
return False
instance.last_init_try = time.time()
logger.info("Trying to initialize module: %s", instance.name)
# If it's an external module, create/update Queues()
if instance.is_external:
instance.create_queues(self.daemon.sync_manager)
# The module instance init function says if initialization is ok
if not instance.init():
logger.warning("Module %s initialisation failed.", instance.name)
return False
logger.info("Module %s is initialized.", instance.name)
except Exception as exp: # pylint: disable=broad-except
# pragma: no cover, simple protection
msg = "The module instance %s raised an exception " \
"on initialization: %s, I remove it!" % (instance.name, str(exp))
self.configuration_errors.append(msg)
logger.error(msg)
logger.exception(exp)
return False
return True
def clear_instances(self, instances=None):
"""Request to "remove" the given instances list or all if not provided
:param instances: instances to remove (all instances are removed if None)
:type instances:
:return: None
"""
if instances is None:
instances = self.instances[:] # have to make a copy of the list
for instance in instances:
self.remove_instance(instance)
def set_to_restart(self, instance):
"""Put an instance to the restart queue
:param instance: instance to restart
:type instance: object
:return: None
"""
self.to_restart.append(instance)
if instance.is_external:
instance.proc = None
def get_instances(self):
"""Create, init and then returns the list of module instances that the caller needs.
This method is called once the Python modules are loaded to initialize the modules.
If an instance can't be created or initialized then only log is doneand that
instance is skipped. The previous modules instance(s), if any, are all cleaned.
:return: module instances list
:rtype: list
"""
self.clear_instances()
for (alignak_module, python_module) in self.modules_assoc:
alignak_module.properties = python_module.properties.copy()
alignak_module.my_daemon = self.daemon
logger.info("Alignak starting module '%s'", alignak_module.get_name())
# if alignak_module.modules:
# modules = []
# for module_uuid in alignak_module.modules:
# if module_uuid in self.modules:
# modules.append(self.modules[module_uuid])
# alignak_module.modules = modules
logger.debug("Module '%s', parameters: %s",
alignak_module.get_name(), alignak_module.__dict__)
try:
instance = python_module.get_instance(alignak_module)
if not isinstance(instance, BaseModule): # pragma: no cover, simple protection
self.configuration_errors.append("Module %s instance is not a "
"BaseModule instance: %s"
% (alignak_module.get_name(),
type(instance)))
raise AttributeError
# pragma: no cover, simple protection
except Exception as exp: # pylint: disable=broad-except
logger.error("The module %s raised an exception on loading, I remove it!",
alignak_module.get_name())
logger.exception("Exception: %s", exp)
self.configuration_errors.append("The module %s raised an exception on "
"loading: %s, I remove it!"
% (alignak_module.get_name(), str(exp)))
else:
# Give the module the data to which daemon/module it is loaded into
instance.set_loaded_into(self.daemon.name)
self.instances.append(instance)
for instance in self.instances:
# External instances are not initialized now, but only when they are started
if not instance.is_external and not self.try_instance_init(instance):
# If the init failed, we put in in the restart queue
logger.warning("The module '%s' failed to initialize, "
"I will try to restart it later", instance.name)
self.set_to_restart(instance)
return self.instances
def start_external_instances(self, late_start=False):
"""Launch external instances that are load correctly
:param late_start: If late_start, don't look for last_init_try
:type late_start: bool
:return: None
"""
for instance in [i for i in self.instances if i.is_external]:
# But maybe the init failed a bit, so bypass this ones from now
if not self.try_instance_init(instance, late_start=late_start):
logger.warning("The module '%s' failed to init, I will try to restart it later",
instance.name)
self.set_to_restart(instance)
continue
# ok, init succeed
logger.info("Starting external module %s", instance.name)
instance.start()
def remove_instance(self, instance):
"""Request to cleanly remove the given instance.
If instance is external also shutdown it cleanly
:param instance: instance to remove
:type instance: object
:return: None
"""
# External instances need to be close before (process + queues)
if instance.is_external:
logger.info("Request external process to stop for %s", instance.name)
instance.stop_process()
logger.info("External process stopped.")
instance.clear_queues(self.daemon.sync_manager)
# Then do not listen anymore about it
self.instances.remove(instance)
def check_alive_instances(self):
"""Check alive instances.
If not, log error and try to restart it
:return: None
"""
# Only for external
for instance in self.instances:
if instance in self.to_restart:
continue
if instance.is_external and instance.process and not instance.process.is_alive():
logger.error("The external module %s died unexpectedly!", instance.name)
logger.info("Setting the module %s to restart", instance.name)
# We clean its queues, they are no more useful
instance.clear_queues(self.daemon.sync_manager)
self.set_to_restart(instance)
# Ok, no need to look at queue size now
continue
# Now look for maximum queue size. If above the defined value, the module may have
# a huge problem and so bailout. It's not a perfect solution, more a watchdog
# If max_queue_size is 0, don't check this
if self.daemon.max_queue_size == 0:
continue
# Check for module queue size
queue_size = 0
try:
queue_size = instance.to_q.qsize()
except Exception: # pylint: disable=broad-except
pass
if queue_size > self.daemon.max_queue_size:
logger.error("The module %s has a too important queue size (%s > %s max)!",
instance.name, queue_size, self.daemon.max_queue_size)
logger.info("Setting the module %s to restart", instance.name)
# We clean its queues, they are no more useful
instance.clear_queues(self.daemon.sync_manager)
self.set_to_restart(instance)
def try_to_restart_deads(self):
"""Try to reinit and restart dead instances
:return: None
"""
to_restart = self.to_restart[:]
del self.to_restart[:]
for instance in to_restart:
logger.warning("Trying to restart module: %s", instance.name)
if self.try_instance_init(instance):
logger.warning("Restarting %s...", instance.name)
# Because it is a restart, clean the module inner process reference
instance.process = None
# If it's an external module, it will start the process
instance.start()
# Ok it's good now :)
else:
# Will retry later...
self.to_restart.append(instance)
def get_internal_instances(self, phase=None):
"""Get a list of internal instances (in a specific phase)
If phase is None, return all internal instances whtever the phase
:param phase: phase to filter (never used)
:type phase:
:return: internal instances list
:rtype: list
"""
if phase is None:
return [instance for instance in self.instances if not instance.is_external]
return [instance for instance in self.instances
if not instance.is_external and phase in instance.phases and
instance not in self.to_restart]
def get_external_instances(self, phase=None):
"""Get a list of external instances (in a specific phase)
If phase is None, return all external instances whtever the phase
:param phase: phase to filter (never used)
:type phase:
:return: external instances list
:rtype: list
"""
if phase is None:
return [instance for instance in self.instances if instance.is_external]
return [instance for instance in self.instances
if instance.is_external and phase in instance.phases and
instance not in self.to_restart]
def stop_all(self):
"""Stop all module instances
:return: None
"""
logger.info('Shutting down modules...')
# Ask internal to quit if they can
for instance in self.get_internal_instances():
if hasattr(instance, 'quit') and isinstance(instance.quit, collections.Callable):
instance.quit()
self.clear_instances([instance for instance in self.instances if instance.is_external])
|
class ModulesManager(object):
'''This class is used to manage modules and call callback'''
def __init__(self, daemon):
'''
:param daemon: the daemon for which modules manager is created
:type daemon: alignak.Daemon
'''
pass
def set_daemon_name(self, daemon_name):
'''Set the daemon name of the daemon which this manager is attached to
and propagate this daemon name to our managed modules
:param daemon_name:
:return:
'''
pass
def load_and_init(self, modules):
'''Import, instantiate & "init" the modules we manage
:param modules: list of the managed modules
:return: True if no errors
'''
pass
def load_and_init(self, modules):
'''Load Python modules and check their usability
:param modules: list of the modules that must be loaded
:return:
'''
pass
def try_instance_init(self, instance, late_start=False):
'''Try to "initialize" the given module instance.
:param instance: instance to init
:type instance: object
:param late_start: If late_start, don't look for last_init_try
:type late_start: bool
:return: True on successful init. False if instance init method raised any Exception.
:rtype: bool
'''
pass
def clear_instances(self, instances=None):
'''Request to "remove" the given instances list or all if not provided
:param instances: instances to remove (all instances are removed if None)
:type instances:
:return: None
'''
pass
def set_to_restart(self, instance):
'''Put an instance to the restart queue
:param instance: instance to restart
:type instance: object
:return: None
'''
pass
def get_instances(self):
'''Create, init and then returns the list of module instances that the caller needs.
This method is called once the Python modules are loaded to initialize the modules.
If an instance can't be created or initialized then only log is doneand that
instance is skipped. The previous modules instance(s), if any, are all cleaned.
:return: module instances list
:rtype: list
'''
pass
def start_external_instances(self, late_start=False):
'''Launch external instances that are load correctly
:param late_start: If late_start, don't look for last_init_try
:type late_start: bool
:return: None
'''
pass
def remove_instance(self, instance):
'''Request to cleanly remove the given instance.
If instance is external also shutdown it cleanly
:param instance: instance to remove
:type instance: object
:return: None
'''
pass
def check_alive_instances(self):
'''Check alive instances.
If not, log error and try to restart it
:return: None
'''
pass
def try_to_restart_deads(self):
'''Try to reinit and restart dead instances
:return: None
'''
pass
def get_internal_instances(self, phase=None):
'''Get a list of internal instances (in a specific phase)
If phase is None, return all internal instances whtever the phase
:param phase: phase to filter (never used)
:type phase:
:return: internal instances list
:rtype: list
'''
pass
def get_external_instances(self, phase=None):
'''Get a list of external instances (in a specific phase)
If phase is None, return all external instances whtever the phase
:param phase: phase to filter (never used)
:type phase:
:return: external instances list
:rtype: list
'''
pass
def stop_all(self):
'''Stop all module instances
:return: None
'''
pass
| 16 | 16 | 23 | 3 | 12 | 8 | 3 | 0.68 | 1 | 6 | 1 | 0 | 15 | 10 | 15 | 15 | 364 | 58 | 188 | 42 | 172 | 127 | 162 | 39 | 146 | 7 | 1 | 3 | 50 |
4,125 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_macros_resolver.py
|
tests.test_macros_resolver.TestMacroResolverWithoutEnv
|
class TestMacroResolverWithoutEnv(MacroResolverTester, AlignakTest):
"""Test without enabled environment macros"""
def setUp(self):
super(TestMacroResolverWithoutEnv, self).setUp()
# Do not provide environment file to use the default one
self.setup_file = 'cfg/cfg_macroresolver_environment.cfg'
self.setup_with_file(self.setup_file, dispatching=True)
assert self.conf_is_correct
# Get an initialized macro resolver object
self.mr = MacroResolver()
self.mr.init(self._scheduler.pushed_conf)
assert self.mr.env_prefix == 'NAGIOS_'
|
class TestMacroResolverWithoutEnv(MacroResolverTester, AlignakTest):
'''Test without enabled environment macros'''
def setUp(self):
pass
| 2 | 1 | 12 | 2 | 8 | 2 | 1 | 0.33 | 2 | 2 | 1 | 0 | 1 | 2 | 1 | 75 | 15 | 3 | 9 | 4 | 7 | 3 | 9 | 4 | 7 | 1 | 2 | 0 | 1 |
4,126 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_maintenance_period.py
|
tests.test_maintenance_period.TestMaintenancePeriod
|
class TestMaintenancePeriod(AlignakTest):
"""
This class tests the maintenance_period
"""
def setUp(self):
super(TestMaintenancePeriod, self).setUp()
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
assert self.conf_is_correct
# No error messages
assert len(self.configuration_errors) == 0
# No warning messages
assert len(self.configuration_warnings) == 0
def test_maintenance_period_host(self):
"""Test a host enter in maintenance_period
:return: None
"""
# Get the host
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = []
# Not any downtime yet !
assert host.downtimes == {}
# Make the host be UP
self.scheduler_loop(1, [[host, 0, 'UP']])
# we create a new timeperiod from now -5 minutes to now + 55 minutes
begin = datetime.now() - timedelta(minutes=5)
end = datetime.now() + timedelta(minutes=55)
h_begin = format(begin, '%H:%M')
if format(begin, '%H') == '23' and format(begin, '%M') >= 55:
h_begin = '00:00'
h_end = format(end, '%H:%M')
end = end - timedelta(seconds=int(format(end, '%S')))
timestamp_end = int(time.mktime(end.timetuple()))
data = {
'timeperiod_name': 'maintenance',
'sunday': h_begin + '-' + h_end,
'monday': h_begin + '-' + h_end,
'tuesday': h_begin + '-' + h_end,
'wednesday': h_begin + '-' + h_end,
'thursday': h_begin + '-' + h_end,
'friday': h_begin + '-' + h_end,
'saturday': h_begin + '-' + h_end
}
timeperiod = Timeperiod(data)
timeperiod.explode()
self._scheduler.timeperiods[timeperiod.uuid] = timeperiod
host.maintenance_period = timeperiod.uuid
# Make the host be UP again
self.scheduler_loop(1, [[host, 0, 'UP']])
assert 1 == len(host.downtimes)
# The host is still in a downtime period
assert host.in_scheduled_downtime
downtime = list(host.downtimes.values())[0]
assert downtime.fixed
assert downtime.is_in_effect
assert not downtime.can_be_deleted
assert downtime.end_time == timestamp_end
assert downtime.comment == 'This downtime was automatically scheduled by Alignak because ' \
'of a maintenance period.'
|
class TestMaintenancePeriod(AlignakTest):
'''
This class tests the maintenance_period
'''
def setUp(self):
pass
def test_maintenance_period_host(self):
'''Test a host enter in maintenance_period
:return: None
'''
pass
| 3 | 2 | 32 | 4 | 23 | 6 | 2 | 0.3 | 1 | 6 | 1 | 0 | 2 | 0 | 2 | 57 | 69 | 9 | 46 | 12 | 43 | 14 | 35 | 12 | 32 | 2 | 2 | 1 | 3 |
4,127 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/log.py
|
alignak.log.ColorStreamHandler
|
class ColorStreamHandler(StreamHandler):
"""
This logging handler provides colored logs when logs are emitted to a tty.
"""
def emit(self, record):
colors = {'DEBUG': 'cyan', 'INFO': 'green',
'WARNING': 'yellow', 'CRITICAL': 'magenta', 'ERROR': 'red'}
msg = self.format(record)
try:
if getattr(sys.stdout, 'isatty', False):
cprint(msg, colors[record.levelname])
else:
print(msg)
except UnicodeEncodeError: # pragma: no cover, simple protection
print(msg.encode('ascii', 'ignore'))
except IOError: # pragma: no cover, simple protection
# May happen when process are closing
pass
except TypeError: # pragma: no cover, simple protection
self.handleError(record)
|
class ColorStreamHandler(StreamHandler):
'''
This logging handler provides colored logs when logs are emitted to a tty.
'''
def emit(self, record):
pass
| 2 | 1 | 16 | 0 | 15 | 4 | 5 | 0.44 | 1 | 2 | 0 | 0 | 1 | 0 | 1 | 26 | 20 | 0 | 16 | 4 | 14 | 7 | 14 | 4 | 12 | 5 | 4 | 2 | 5 |
4,128 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_metrics.py
|
tests.test_metrics.FakeCarbonServer
|
class FakeCarbonServer(threading.Thread):
def __init__(self, host='127.0.0.1', port=0):
super(FakeCarbonServer, self).__init__()
self.setDaemon(True)
self.port = port
self.cli_socks = [] # will retain the client socks here
sock = self.sock = socket.socket()
sock.settimeout(1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
if not port:
self.port = sock.getsockname()[1]
sock.listen(0)
self.running = True
print("Starting fake carbon server on %d" % port)
self.start()
def stop(self):
self.running = False
self.sock.close()
def run(self):
while self.running:
try:
sock, addr = self.sock.accept()
except socket.error:
pass
else:
# so that we won't block indefinitely in handle_connection
# in case the client doesn't send anything :
sock.settimeout(3)
self.cli_socks.append(sock)
self.handle_connection(sock)
self.cli_socks.remove(sock)
def handle_connection(self, sock):
data = sock.recv(4096)
print("Fake carbon received: %s" % pickle.dumps(data))
sock.close()
|
class FakeCarbonServer(threading.Thread):
def __init__(self, host='127.0.0.1', port=0):
pass
def stop(self):
pass
def run(self):
pass
def handle_connection(self, sock):
pass
| 5 | 0 | 9 | 0 | 8 | 1 | 2 | 0.09 | 1 | 2 | 0 | 0 | 4 | 4 | 4 | 29 | 39 | 3 | 34 | 11 | 29 | 3 | 34 | 11 | 29 | 3 | 1 | 2 | 7 |
4,129 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_metrics.py
|
tests.test_metrics.TestMetricsSetup
|
class TestMetricsSetup(AlignakTest):
"""
This class tests the inner metrics module set-up
"""
def setUp(self):
super(TestMetricsSetup, self).setUp()
# Log at DEBUG level
self.set_unit_tests_logger_level()
self.clear_logs()
if os.path.exists('/tmp/alignak-metrics.log'):
os.remove('/tmp/alignak-metrics.log')
# Create a fake server
self.fake_carbon = FakeCarbonServer(port=2004)
# # Create a fake server
# self.fake_carbon = FakeInfluxDBServer(port=8086)
#
if os.path.exists('/tmp/alignak-metrics.log'):
os.remove('/tmp/alignak-metrics.log')
def tearDown(self):
super(TestMetricsSetup, self).tearDown()
self.fake_carbon.stop()
self.fake_carbon.join()
def test_default_is_disabled(self):
""" Test that default configuration is metrics disabled
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
# Default configuration do not enable the module
assert self._scheduler.pushed_conf.process_performance_data is True
assert self._scheduler.pushed_conf.host_perfdata_command is None
assert self._scheduler.pushed_conf.service_perfdata_command is None
assert self._scheduler.pushed_conf.host_perfdata_file == ''
assert self._scheduler.pushed_conf.service_perfdata_file == ''
assert self._broker_daemon.modules == []
def test_inner_module_enabled(self):
""" Test that inner metrics module may be enabled
If configuration parameters host_perfdata_command or service_perfdata_command
are declared and not empty and if process_performance_data is set, the inner metrics
module is configured and enabled to push performance data to the Alignak configured
StatsD / Graphite interface.
:return: None
"""
self.setup_with_file('cfg/cfg_metrics.cfg',
dispatching=True)
# Specific configuration enables the module
assert self._scheduler.pushed_conf.process_performance_data is True
assert self._scheduler.pushed_conf.host_perfdata_file == 'go-hosts'
assert self._scheduler.pushed_conf.service_perfdata_file == 'go-services'
assert 1 == len(self._broker_daemon.modules)
self.show_logs()
def test_inner_module_configuration(self):
""" Test that inner metrics module may be configured in Alignak configuration
With this configuration, hosts/services cache is enabled and tested. Broks for
unknown hosts/services are ignored.
:return: None
"""
with requests_mock.mock() as mr:
mr.get("http://localhost:8086/ping",
json={
"results":[{
"statement_id":0, "version":"1.7.2"
}]
}, status_code=204, headers={"x-influxdb-version": "1.7.2"})
mr.get("http://localhost:8086/query?q=SHOW+DATABASES&db=alignak",
json={
"results":[{
"statement_id":0, "series":[
{"name":"databases","columns":["name"],"values":[["_internal"]]}
]
}]
})
mr.get("http://localhost:8086/query?q=SHOW+DATABASES&db=alignak",
json={"results": [{"statement_id": 0}]})
mr.post("http://localhost:8086/query?q=CREATE+DATABASE+%22alignak%22&db=alignak",
json={"results":[{ "statement_id":0 }]})
mr.post("http://localhost:8086/query?q=CREATE+RETENTION+POLICY+%22alignak%22+ON+%22alignak%22+DURATION+1y+REPLICATION+1+SHARD+DURATION+0s&db=alignak",
json={"results":[{ "statement_id":0 }]})
mr.post("http://localhost:8086/write?db=alignak", status_code=204,
json={"results":[{ "statement_id":0 }]})
self.setup_with_file('cfg/cfg_metrics.cfg',
'cfg/inner_metrics/alignak.ini',
dispatching=True)
# Specific configuration enables the module
assert self._scheduler.pushed_conf.process_performance_data is True
assert self._scheduler.pushed_conf.host_perfdata_file == 'go-hosts'
assert self._scheduler.pushed_conf.service_perfdata_file == 'go-services'
assert 1 == len(self._broker_daemon.modules)
self.show_logs()
# The declared module instance
my_module = self._broker_daemon.modules[0]
print(my_module)
# Generic stuff
assert my_module.python_name == 'alignak.modules.inner_metrics'
assert my_module.type == 'metrics'
# assert my_module.alias == 'inner-metrics'
assert my_module.enabled is True
# Specific stuff - the content of the configuration parameters
# When the module is configured in Alignak configuration, it does not exist!
# assert my_module.host_perfdata_file == 'go-hosts'
# assert my_module.service_perfdata_file == 'go-services'
assert my_module.output_file == '/tmp/alignak-metrics.log'
self.clear_logs()
# Module is not yet initialized, let's do it in place of the daemon.
# Create the modules manager for a daemon type
self.modules_manager = ModulesManager(self._broker_daemon)
# Load an initialize the modules:
# - load python module
# - get module properties and instances
self.modules_manager.load_and_init([my_module])
self.show_logs()
# self.assert_log_match(
# "Targets configuration: graphite: True, influxdb: True, "
# "file: /tmp/alignak-metrics.log", 10)
#
self.assert_log_match(
"targets configuration: graphite: True, influxdb: True, "
"file: /tmp/alignak-metrics.log", 9)
self.assert_log_match(
"Storing metrics in an output file is configured. Do not forget "
"to regularly clean this file to avoid important disk usage!", 10)
index = 22
self.assert_log_match("Trying to initialize module: inner-metrics", index)
index += 1
self.assert_log_match("testing storage to /tmp/alignak-metrics.log ...", index)
index += 1
self.assert_log_match("Ok", index)
index += 1
self.assert_log_match("testing connection to InfluxDB localhost:8086 ...", index)
index += 1
self.assert_log_match("connected, InfluxDB version 1.7.2", index)
index += 1
self.assert_log_match("testing connection to Graphite localhost:2004 ...", index)
index += 1
self.assert_log_match("Ok", index)
index += 1
self.assert_log_match("creating database alignak...", index)
index += 1
# self.assert_log_match("creating database retention policy: alignak - 1y - 1...", 32)
# self.assert_log_match("Ok", 33)
self.assert_log_match("Module inner-metrics is initialized.", index)
index += 1
# Module is an internal one (no external process) in the broker daemon modules manager
my_module = self._broker_daemon.modules_manager.instances[0]
assert my_module.is_external is False
# Known hosts/services cache is empty
assert my_module.hosts_cache == {}
assert my_module.services_cache == {}
# File output - we still got a metric for the connection test!
assert os.path.exists('/tmp/alignak-metrics.log')
with open('/tmp/alignak-metrics.log') as f:
lines = f.readlines()
first_line = False
for line in lines:
assert 3 == len(line.split(';'))
if not first_line:
line = line.strip()
metric = line.split(';')
assert metric[0] == metric[2]
assert metric[1] == 'connection-test'
print(line)
# Some metrics were stored
assert 2 == len(lines)
# When the broker daemon receives a Brok, it is propagated to the module
# Host check result
self.clear_logs()
hcr = {
"host_name": "srv001",
"last_time_unreachable": 0,
"last_problem_id": 0,
"passive_check": False,
"retry_interval": 1,
"last_event_id": 0,
"problem_has_been_acknowledged": False,
"command_name": "pm-check_linux_host_alive",
"last_state": "UP",
"latency": 0.2317881584,
"last_state_type": "HARD",
"last_hard_state_change": 1444427108,
"last_time_up": 0,
"percent_state_change": 0.0,
"state": "DOWN",
"last_chk": 1444427104,
"last_state_id": 0,
"end_time": 0,
"timeout": 0,
"current_event_id": 10,
"execution_time": 3.1496069431000002,
"start_time": 0,
"return_code": 2,
"state_type": "SOFT",
"output": "CRITICAL - Plugin timed out after 10 seconds",
"in_checking": True,
"early_timeout": 0,
"in_scheduled_downtime": False,
"attempt": 0,
"state_type_id": 1,
"acknowledgement_type": 1,
"last_state_change": 1444427108.040841,
"last_time_down": 1444427108,
"instance_id": 0,
"long_output": "",
"current_problem_id": 0,
"check_interval": 5,
"state_id": 2,
"has_been_checked": 1,
"perf_data": "uptime=1200;rta=0.049000ms;2.000000;3.000000;0.000000 pl=0%;50;80;0"
}
b = Brok({'data': hcr, 'type': 'host_check_result'})
self._broker_daemon.manage_brok(b)
self.show_logs()
self.assert_log_count(2)
self.assert_log_match("host check result: srv001", 0)
self.assert_log_match("received host check result for an unknown host: srv001", 1)
# Service check result
self.clear_logs()
scr = {
"host_name": "srv001",
"service_description": "ping",
"command_name": "ping",
"attempt": 1,
"execution_time": 3.1496069431000002,
"latency": 0.2317881584,
"return_code": 2,
"state": "OK",
"state_type": "HARD",
"state_id": 0,
"state_type_id": 1,
"output": "PING OK - Packet loss = 0%, RTA = 0.05 ms",
"long_output": "Long output ...",
"perf_data": "rta=0.049000ms;2.000000;3.000000;0.000000 pl=0%;50;80;0",
"passive_check": False,
"problem_has_been_acknowledged": False,
"acknowledgement_type": 1,
"in_scheduled_downtime": False,
"last_chk": 1473597375,
"last_state_change": 1444427108.147903,
"last_state_id": 0,
"last_state": "UNKNOWN",
"last_state_type": "HARD",
"last_hard_state_change": 0.0,
"last_time_unknown": 0,
"last_time_unreachable": 0,
"last_time_critical": 1473597376,
"last_time_warning": 0,
"last_time_ok": 0,
"retry_interval": 2,
"percent_state_change": 4.1,
"check_interval": 5,
"in_checking": False,
"early_timeout": 0,
"instance_id": "3ac88dd0c1c04b37a5d181622e93b5bc",
"current_event_id": 1,
"last_event_id": 0,
"current_problem_id": 1,
"last_problem_id": 0,
"timeout": 0,
"has_been_checked": 1,
"start_time": 0,
"end_time": 0
}
b = Brok({'data': scr, 'type': 'service_check_result'})
self._broker_daemon.manage_brok(b)
self.show_logs()
self.assert_log_count(2)
self.assert_log_match("service check result: srv001/ping", 0)
self.assert_log_match("received service check result for an unknown host", 1)
# Initial host status
self.clear_logs()
hcr = {
"host_name": "srv001",
}
b = Brok({'data': hcr, 'type': 'initial_host_status'})
self._broker_daemon.manage_brok(b)
self.show_logs()
# The module inner cache stored the host
assert 'srv001' in my_module.hosts_cache
assert my_module.hosts_cache['srv001'] == {'realm_name': 'All'}
assert my_module.services_cache == {}
# Initial service status
self.clear_logs()
hcr = {
"host_name": "srv001",
"service_description": "disks"
}
b = Brok({'data': hcr, 'type': 'initial_service_status'})
self._broker_daemon.manage_brok(b)
self.show_logs()
# The module inner cache stored the host
assert 'srv001' in my_module.hosts_cache
assert my_module.hosts_cache['srv001'] == {'realm_name': 'All'}
assert 'srv001/disks' in my_module.services_cache
assert my_module.services_cache['srv001/disks'] == {}
# Now the host srv001 is known in the module, let's raise an host brok
# Host check result
self.clear_logs()
hcr = {
"host_name": "srv001",
"last_time_unreachable": 0,
"last_problem_id": 0,
"passive_check": False,
"retry_interval": 1,
"last_event_id": 0,
"problem_has_been_acknowledged": False,
"command_name": "pm-check_linux_host_alive",
"last_state": "UP",
"latency": 0.2317881584,
"last_state_type": "HARD",
"last_hard_state_change": 1444427108,
"last_time_up": 0,
"percent_state_change": 0.0,
"state": "DOWN",
"last_chk": 1444427104,
"last_state_id": 0,
"end_time": 0,
"timeout": 0,
"current_event_id": 10,
"execution_time": 3.1496069431000002,
"start_time": 0,
"return_code": 2,
"state_type": "SOFT",
"output": "CRITICAL - Plugin timed out after 10 seconds",
"in_checking": True,
"early_timeout": 0,
"in_scheduled_downtime": False,
"attempt": 0,
"state_type_id": 1,
"acknowledgement_type": 1,
"last_state_change": 1444427108.040841,
"last_time_down": 1444427108,
"instance_id": 0,
"long_output": "",
"current_problem_id": 0,
"check_interval": 5,
"state_id": 2,
"has_been_checked": 1,
"perf_data": "uptime=1200 rta=0.049000ms;2.000000;3.000000;0.000000 pl=0%;50;80;0"
}
b = Brok({'data': hcr, 'type': 'host_check_result'})
self._broker_daemon.manage_brok(b)
self.show_logs()
self.assert_log_count(9)
self.assert_log_match("host check result: srv001", 0)
self.assert_log_match("service: host_check, metric: ", 1)
self.assert_log_match("service: host_check, metric: ", 2)
self.assert_log_match("service: host_check, metric: ", 3)
self.assert_log_match("Metrics: host_check - ", 4)
self.assert_log_match("Metrics data: ", 5)
self.assert_log_match("Flushing 1 metrics to Graphite/carbon", 6)
self.assert_log_match("Flushing 1 metrics to InfluxDB", 7)
self.assert_log_match("Storing 1 metrics to /tmp/alignak-metrics.log", 8)
# Service check result
self.clear_logs()
scr = {
"host_name": "srv001",
"service_description": "disks",
"last_time_unreachable": 0,
"last_problem_id": 0,
"passive_check": False,
"retry_interval": 1,
"last_event_id": 0,
"problem_has_been_acknowledged": False,
"command_name": "pm-check_linux_disks",
"last_state": "UP",
"latency": 0.2317881584,
"last_state_type": "HARD",
"last_hard_state_change": 1444427108,
"last_time_up": 0,
"percent_state_change": 0.0,
"state": "OK",
"last_chk": 1444427104,
"last_state_id": 0,
"end_time": 0,
"timeout": 0,
"current_event_id": 10,
"execution_time": 3.1496069431000002,
"start_time": 0,
"return_code": 2,
"state_type": "SOFT",
"output": "DISK OK - free space: / 3326 MB (56%); / 15272 MB (77%);/boot 68 MB (69%);/home 69357 MB (27%);/var/log 819 MB (84%);",
"in_checking": True,
"early_timeout": 0,
"in_scheduled_downtime": False,
"attempt": 0,
"state_type_id": 1,
"acknowledgement_type": 1,
"last_state_change": 1444427108.040841,
"last_time_down": 1444427108,
"instance_id": 0,
"long_output": "",
"current_problem_id": 0,
"check_interval": 5,
"state_id": 2,
"has_been_checked": 1,
"perf_data": "/=2643MB;5948;5958;0;5968 /boot=68MB;88;93;0;98 /home=69357MB;253404;253409;0;253414 /var/log=818MB;970;975;0;980"
}
b = Brok({'data': scr, 'type': 'service_check_result'})
self._broker_daemon.manage_brok(b)
self.show_logs()
self.assert_log_count(10)
self.assert_log_match("service check result: srv001/disks", 0)
self.assert_log_match(re.escape("service: disks, metric: "), 1)
self.assert_log_match(re.escape("service: disks, metric: "), 2)
self.assert_log_match(re.escape("service: disks, metric: "), 3)
self.assert_log_match(re.escape("service: disks, metric: "), 4)
self.assert_log_match(re.escape("Metrics: disks - "), 5)
self.assert_log_match("Metrics data: ", 6)
self.assert_log_match("Flushing 1 metrics to Graphite/carbon", 7)
self.assert_log_match("Flushing 1 metrics to InfluxDB", 8)
self.assert_log_match("Storing 1 metrics to /tmp/alignak-metrics.log", 9)
# Metrics count
# File output
assert os.path.exists('/tmp/alignak-metrics.log')
with open('/tmp/alignak-metrics.log') as f:
lines = f.readlines()
first_line = False
for line in lines:
line = line.strip()
assert 3 == len(line.split(';'))
print(line)
if not first_line:
first_line = True
metric = line.split(';')
assert metric[0] == metric[2]
assert metric[1] == 'connection-test'
# Some metrics were stored!
assert 33 == len(lines)
|
class TestMetricsSetup(AlignakTest):
'''
This class tests the inner metrics module set-up
'''
def setUp(self):
pass
def tearDown(self):
pass
def test_default_is_disabled(self):
''' Test that default configuration is metrics disabled
:return: None
'''
pass
def test_inner_module_enabled(self):
''' Test that inner metrics module may be enabled
If configuration parameters host_perfdata_command or service_perfdata_command
are declared and not empty and if process_performance_data is set, the inner metrics
module is configured and enabled to push performance data to the Alignak configured
StatsD / Graphite interface.
:return: None
'''
pass
def test_inner_module_configuration(self):
''' Test that inner metrics module may be configured in Alignak configuration
With this configuration, hosts/services cache is enabled and tested. Broks for
unknown hosts/services are ignored.
:return: None
'''
pass
| 6 | 4 | 96 | 11 | 73 | 12 | 2 | 0.17 | 1 | 4 | 3 | 0 | 5 | 2 | 5 | 60 | 486 | 59 | 366 | 19 | 360 | 61 | 169 | 17 | 163 | 5 | 2 | 4 | 11 |
4,130 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_modules.py
|
tests.test_modules.TestModules
|
class TestModules(AlignakTest):
"""
This class contains the tests for the modules
"""
def setUp(self):
super(TestModules, self).setUp()
self.set_unit_tests_logger_level('INFO')
def test_module_loading(self):
""" Test arbiter, broker, ... detecting configured modules
:return:
"""
self.setup_with_file('cfg/cfg_default_with_modules.cfg',
'cfg/default_with_modules/alignak.ini',
dispatching=True)
assert self.conf_is_correct
self.show_configuration_logs()
self.show_logs()
# arbiter modules
modules = [m.module_alias for m in self._arbiter.link_to_myself.modules]
assert modules == ['Example']
modules = [m.name for m in self._arbiter.link_to_myself.modules]
assert modules == ['Example']
# broker modules
modules = [m.module_alias for m in self._broker_daemon.modules]
assert modules == ['Example']
modules = [m.name for m in self._broker_daemon.modules]
assert modules == ['Example']
# # The only existing poller module is Example declared in the configuration
# modules = [m.module_alias for m in self.pollers['poller-master'].modules]
# assert modules == ['Example']
#
# # The only existing receiver module is Example declared in the configuration
# modules = [m.module_alias for m in self.receivers['receiver-master'].modules]
# assert modules == ['Example']
#
# # The only existing reactionner module is Example declared in the configuration
# modules = [m.module_alias for m in self.reactionners['reactionner-master'].modules]
# assert modules == ['Example']
# No scheduler modules created
modules = [m.module_alias for m in self._scheduler_daemon.modules]
assert modules == ['Example']
modules = [m.name for m in self._scheduler_daemon.modules]
assert modules == ['Example']
self.show_logs()
# Loading module logs
self.assert_any_log_match(re.escape(
u"Importing Python module 'alignak_module_example' for Example..."
))
self.assert_any_log_match(re.escape(
u"Imported 'alignak_module_example' for Example"
))
self.assert_any_log_match(re.escape(
u"Give an instance of alignak_module_example for alias: Example"
))
self.assert_any_log_match(re.escape(
u"I correctly loaded my modules: [Example]"
))
def test_arbiter_configuration_module(self):
""" Test arbiter configuration loading
:return:
"""
self.setup_with_file('./cfg/modules/arbiter_modules.cfg')
assert self.conf_is_correct
self.show_configuration_logs()
self.show_logs()
# The arbiter module is 'backend_arbiter' declared in the configuration
modules = [m.module_alias for m in self._arbiter.link_to_myself.modules]
assert modules == ['Example']
def test_module_on_module(self):
""" No module configuration for modules
Check that the feature is detected as disabled
:return:
"""
self.setup_with_file('cfg/modules/alignak_module_with_submodules.cfg',
dispatching=True)
assert self.conf_is_correct
self.show_configuration_logs()
self.show_logs()
# All modules
modules = [m.module_alias for m in self._arbiter.conf.modules]
print("All modules: %s", modules)
# Confirm that the extra modules are present in the configuration
assert 'composite' in modules
assert 'part-A' in modules
assert 'part-B' in modules
test_module = [m for m in self._arbiter.conf.modules if m.module_alias == 'composite'][0]
part_a_module = [m for m in self._arbiter.conf.modules if m.module_alias == 'part-A'][0]
part_b_module = [m for m in self._arbiter.conf.modules if m.module_alias == 'part-B'][0]
print("Composite module: %s" % test_module.__dict__)
assert test_module.modules == [part_a_module, part_b_module]
# Find the new broker
broker_master = [b for b in self._arbiter.conf.brokers if b.get_name() == 'broker-master'][0]
print("Broker master modules: %s" % broker_master.modules)
modules = [m.module_alias for m in broker_master.modules]
assert modules == ['Example']
new_broker = [b for b in self._arbiter.conf.brokers if b.get_name() == 'broker-master-second'][0]
print("Broker second modules: %s" % new_broker.modules)
modules = [m.module_alias for m in new_broker.modules]
assert modules == ['composite']
composite_module = new_broker.modules[0]
print("Sub modules: %s" % composite_module.modules)
assert len(composite_module.modules) == 2
# Scheduler modules created
modules = [m.module_alias for m in self._scheduler_daemon.modules]
assert modules == ['Example', 'inner-retention']
modules = [m.name for m in self._scheduler_daemon.modules]
assert modules == ['Example', 'inner-retention']
self._broker_daemon.modules_manager.stop_all()
self.show_logs()
def test_modulemanager_1(self):
""" Module manager manages its modules - old form
Test if the module manager manages correctly all the modules
:return:
"""
self.setup_with_file('cfg/cfg_default_with_modules.cfg',
'cfg/default_with_modules/alignak.ini',
dispatching=True)
assert self.conf_is_correct
# Create an Alignak module
mod = Module({
'module_alias': 'mod-example',
'module_types': 'example',
'python_name': 'alignak_module_example'
})
self.run_modulemanager(mod)
def test_modulemanager_2(self):
""" Module manager manages its modules - new form
Test if the module manager manages correctly all the modules
:return:
"""
self.setup_with_file('cfg/cfg_default_with_modules.cfg',
'cfg/default_with_modules/alignak.ini',
dispatching=True)
assert self.conf_is_correct
# Create an Alignak module
mod = Module({
'name': 'mod-example',
'type': 'example',
'python_name': 'alignak_module_example'
})
self.run_modulemanager(mod)
def run_modulemanager(self, mod):
# Force the daemon SyncManager to None for unit tests!
self._broker_daemon.sync_manager = None
# Create the modules manager for a daemon type
self.modules_manager = ModulesManager(self._broker_daemon)
# Load an initialize the modules:
# - load python module
# - get module properties and instances
self.modules_manager.load_and_init([mod])
# Loading module logs
self.assert_any_log_match(re.escape(
"Importing Python module 'alignak_module_example' for mod-example..."
))
self.assert_any_log_match(re.escape(
"Imported 'alignak_module_example' for mod-example"
))
self.assert_any_log_match(re.escape(
"Give an instance of alignak_module_example for alias: mod-example"
))
self.clear_logs()
my_module = self.modules_manager.instances[0]
assert my_module.is_external
# Get list of not external modules
assert [] == self.modules_manager.get_internal_instances()
for phase in ['configuration', 'late_configuration', 'running', 'retention']:
assert [] == self.modules_manager.get_internal_instances(phase)
# Get list of external modules
assert [my_module] == self.modules_manager.get_external_instances()
for phase in ['configuration', 'late_configuration', 'running', 'retention']:
assert [my_module] == self.modules_manager.get_external_instances(phase)
# Start external modules
self.modules_manager.start_external_instances()
self.show_logs()
# Starting external module logs
idx = 0
self.assert_log_match(re.escape(
"Trying to initialize module: mod-example"
), idx)
idx += 1
self.assert_log_match(re.escape(
"Test - Example in init"
), idx)
idx += 1
self.assert_log_match(re.escape(
"Initialization of the example module"
), idx)
idx += 1
self.assert_log_match(re.escape(
"Module mod-example is initialized"
), idx)
idx += 1
self.assert_log_match(re.escape(
"Starting external module mod-example"
), idx)
idx += 1
self.assert_log_match(re.escape(
"Starting external process for module mod-example"
), idx)
idx += 1
self.assert_log_match(re.escape(
"mod-example is now started (pid="
), idx)
idx += 1
self.assert_log_count(7)
# Check alive
assert my_module.process is not None
assert my_module.process.is_alive()
self.clear_logs()
# Check the alive module instances...
self.modules_manager.check_alive_instances()
# Try to restart the dead modules, if any
self.modules_manager.try_to_restart_deads()
self.assert_log_count(0)
# Kill the external module (normal stop is .stop_process)
self.clear_logs()
my_module.kill()
idx = 0
self.assert_log_match(re.escape(
"Killing external module "
), idx)
idx += 1
self.show_logs()
# self.assert_log_match(re.escape(
# "mod-example is still living "
# ), idx)
# idx += 1
# Specific case because sometimes the module is not killed within the expected 10s time
normal_kill = True
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for handler in logger_.handlers:
if not isinstance(handler, CollectorHandler):
continue
regex = re.compile('mod-example is still living')
log_num = 0
found = False
for log in handler.collector:
if idx == log_num:
if regex.search(log):
idx += 1
normal_kill = False
break
log_num += 1
break
self.assert_log_match(re.escape(
"External module killed"
), idx)
idx += 1
self.assert_log_count(idx)
# The module is dead (not normally stopped...) so this module inner
# process reference is not None!
assert my_module.process is not None
# Check the alive module instances...
self.clear_logs()
idx = 0
self.modules_manager.check_alive_instances()
self.show_logs()
self.assert_log_match(re.escape(
"The external module mod-example died unexpectedly!"
), idx)
idx += 1
self.assert_log_match(re.escape(
"Setting the module mod-example to restart"
), idx)
self.assert_log_count(2)
idx += 1
if normal_kill:
# Try to restart the dead modules, if any
# Indeed, it's too early, so it won't do it
self.clear_logs()
idx = 0
print("try init: %d" % my_module.init_try)
self.modules_manager.try_to_restart_deads()
self.show_logs()
self.assert_log_match(re.escape(
"Trying to restart module: mod-example"
), idx)
idx += 1
self.assert_log_match(re.escape(
"Too early to retry initialization, retry period is %d seconds" % MODULE_INIT_PERIOD
), idx)
idx += 1
self.assert_log_count(2)
# Here the module instance is still dead
assert not my_module.process.is_alive()
# Wait for a minimum delay
time.sleep(MODULE_INIT_PERIOD + 1)
# my_module.last_init_try = -5
self.clear_logs()
self.modules_manager.check_alive_instances()
self.show_logs()
self.assert_log_count(0)
# Try to restart the dead modules, if any
# Now it is time...
self.clear_logs()
idx = 0
self.modules_manager.try_to_restart_deads()
self.show_logs()
self.assert_log_match(re.escape(
"Trying to restart module: mod-example"
), idx)
idx += 1
self.assert_log_match(re.escape(
"Trying to initialize module: mod-example"
), idx)
idx += 1
self.assert_log_match(re.escape(
"Test - Example in init"
), idx)
idx += 1
self.assert_log_match(re.escape(
"Initialization of the example module"
), idx)
idx += 1
self.assert_log_match(re.escape(
"Module mod-example is initialized"
), idx)
idx += 1
self.assert_log_match(re.escape(
"Restarting mod-example..."
), idx)
idx += 1
self.assert_log_match(re.escape(
"Starting external process for module mod-example"
), idx)
idx += 1
self.assert_log_match(re.escape(
"mod-example is now started (pid="
), idx)
idx += 1
self.assert_log_count(8)
# Here the module instance should be alive again
assert my_module.process.is_alive()
# No more module to restart...
assert [] == self.modules_manager.to_restart
# And we clear all now
self.clear_logs()
idx = 0
self.modules_manager.stop_all()
self.show_logs()
self.assert_log_match(re.escape(
"Shutting down modules..."
), idx)
idx += 1
self.assert_log_match(re.escape(
"Request external process to stop for mod-example"
), idx)
idx += 1
self.assert_log_match(re.escape(
"I'm stopping module 'mod-example'"
), idx)
idx += 1
self.assert_log_match(re.escape(
"Killing external module "
), idx)
idx += 1
# Specific case because sometimes the module is not killed within the expected 10s time
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for handler in logger_.handlers:
if not isinstance(handler, CollectorHandler):
continue
regex = re.compile('mod-example is still living')
log_num = 0
found = False
for log in handler.collector:
if idx == log_num:
if regex.search(log):
idx += 1
break
log_num += 1
break
self.assert_log_match(re.escape(
"External module killed"
), idx)
idx += 1
self.assert_log_match(re.escape(
"External process stopped."
), idx)
idx += 1
# self.assert_log_count(6)
def test_modulemanager_several_modules(self):
""" Module manager manages its modules
Test if the module manager manages correctly all the modules
Configured with several modules
:return:
"""
self.setup_with_file('cfg/cfg_default_with_modules.cfg',
'cfg/default_with_modules/alignak.ini',
dispatching=True)
assert self.conf_is_correct
# for mod in self._arbiter.conf.modules:
# print (mod.__dict__)
# Create an Alignak module
mod = Module({
'module_alias': 'mod-example',
'module_types': 'example',
'python_name': 'alignak_module_example',
'option1': 'foo',
'option2': 'bar',
'option3': 1
})
mod2 = Module({
'module_alias': 'mod-example-2',
'module_types': 'example',
'python_name': 'alignak_module_example',
'option1': 'faa',
'option2': 'bor',
'option3': 1
})
# Force the daemon SyncManager to None for unit tests!
self._broker_daemon.sync_manager = None
# Create the modules manager for a daemon type
self.modules_manager = ModulesManager(self._broker_daemon)
print("Modules: %s" % self._broker_daemon.modules)
# Load an initialize the modules:
# - load python module
# - get module properties and instances
assert self.modules_manager.load_and_init([mod, mod2])
print("I correctly loaded my modules: [%s]" % ','.join([inst.name for inst in
self.modules_manager.instances]))
self.show_logs()
self.assert_any_log_match(re.escape(
"Importing Python module 'alignak_module_example' for mod-example..."
))
self.assert_any_log_match(re.escape(
"Imported 'alignak_module_example' for mod-example"
))
self.assert_any_log_match(re.escape(
"Loaded Python module 'alignak_module_example' (mod-example)"
))
self.assert_any_log_match(re.escape(
"Importing Python module 'alignak_module_example' for mod-example-2..."
))
self.assert_any_log_match(re.escape(
"Imported 'alignak_module_example' for mod-example-2"
))
self.assert_any_log_match(re.escape(
"Loaded Python module 'alignak_module_example' (mod-example-2)"
))
self.assert_any_log_match(re.escape(
"Give an instance of alignak_module_example for alias: mod-example"
))
self.assert_any_log_match(re.escape(
"configuration, foo, bar, 1"
))
self.assert_any_log_match(re.escape(
"Give an instance of alignak_module_example for alias: mod-example-2"
))
self.assert_any_log_match(re.escape(
"configuration, faa, bor, 1"
))
# Loading module logs
self.assert_any_log_match(re.escape(
"Importing Python module 'alignak_module_example' for mod-example..."
))
my_module = self.modules_manager.instances[0]
my_module2 = self.modules_manager.instances[1]
assert my_module.is_external
assert my_module2.is_external
# Get list of not external modules
assert [] == self.modules_manager.get_internal_instances()
for phase in ['configuration', 'late_configuration', 'running', 'retention']:
assert [] == self.modules_manager.get_internal_instances(phase)
# Get list of external modules
assert [my_module, my_module2] == self.modules_manager.get_external_instances()
for phase in ['configuration', 'late_configuration', 'running', 'retention']:
assert [my_module, my_module2] == self.modules_manager.get_external_instances(phase)
# Start external modules
self.modules_manager.start_external_instances()
self.modules_manager.start_external_instances()
# Starting external module logs
self.assert_any_log_match(re.escape(
"Starting external module mod-example"
))
self.assert_any_log_match(re.escape(
"Starting external process for module mod-example"
))
self.assert_any_log_match(re.escape(
"mod-example is now started (pid="
))
# Check alive
assert my_module.process is not None
assert my_module.process.is_alive()
assert my_module2.process is not None
assert my_module2.process.is_alive()
# Kill the external module (normal stop is .stop_process)
self.clear_logs()
print("Killing a module")
my_module.kill()
time.sleep(0.1)
self.show_logs()
# Stopping module logs
self.assert_any_log_match(re.escape(
"Killing external module "
))
self.assert_any_log_match(re.escape(
"External module killed"
))
# Should be dead (not normally stopped...) but we still know a process for this module!
assert my_module.process is not None
self.clear_logs()
print("Killing another module")
my_module2.kill()
time.sleep(0.1)
self.show_logs()
# Stopping module logs
self.assert_any_log_match(re.escape(
"Killing external module "
))
self.assert_any_log_match(re.escape(
"External module killed"
))
# Should be dead (not normally stopped...) but we still know a process for this module!
assert my_module.process is not None
# Nothing special ...
self.clear_logs()
self.modules_manager.check_alive_instances()
# Try to restart the dead modules
print("Trying to restart dead modules")
# We lie on the last restart try time
my_module.last_init_try = time.time()
my_module2.last_init_try = time.time()
self.modules_manager.try_to_restart_deads()
self.show_logs()
# In fact it's too early, so it won't do it
# Here the module instances should still be dead
assert not my_module.process.is_alive()
assert not my_module2.process.is_alive()
# We lie on the last restart try time
my_module.last_init_try = 0
my_module2.last_init_try = 0
self.modules_manager.check_alive_instances()
self.modules_manager.try_to_restart_deads()
# Here the module instances should be alive again
assert my_module.process.is_alive()
assert my_module2.process.is_alive()
# Kill the module again
self.clear_logs()
my_module.kill()
self.show_logs()
time.sleep(0.2)
assert not my_module.process.is_alive()
# And we clear all now
self.modules_manager.stop_all()
# Stopping module logs
self.assert_any_log_match(re.escape(
"I'm stopping module "
))
|
class TestModules(AlignakTest):
'''
This class contains the tests for the modules
'''
def setUp(self):
pass
def test_module_loading(self):
''' Test arbiter, broker, ... detecting configured modules
:return:
'''
pass
def test_arbiter_configuration_module(self):
''' Test arbiter configuration loading
:return:
'''
pass
def test_module_on_module(self):
''' No module configuration for modules
Check that the feature is detected as disabled
:return:
'''
pass
def test_modulemanager_1(self):
''' Module manager manages its modules - old form
Test if the module manager manages correctly all the modules
:return:
'''
pass
def test_modulemanager_2(self):
''' Module manager manages its modules - new form
Test if the module manager manages correctly all the modules
:return:
'''
pass
def run_modulemanager(self, mod):
pass
def test_modulemanager_several_modules(self):
''' Module manager manages its modules
Test if the module manager manages correctly all the modules
Configured with several modules
:return:
'''
pass
| 9 | 7 | 77 | 9 | 55 | 13 | 3 | 0.25 | 1 | 4 | 3 | 0 | 8 | 1 | 8 | 63 | 629 | 81 | 437 | 36 | 428 | 111 | 299 | 36 | 290 | 14 | 2 | 4 | 23 |
4,131 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_monitoring_logs.py
|
tests.test_monitoring_logs.TestMonitoringLogs
|
class TestMonitoringLogs(AlignakTest):
"""
This class test the check_result brok
"""
def setUp(self):
super(TestMonitoringLogs, self).setUp()
def check(self, frozen_datetime, item, state_id, output, expected_logs):
"""
:param item: concerned item
:param state_id: state identifier
:param output: state text
:param expected_logs: expected monitoring logs
:return:
"""
# Clear monitoring events
self.clear_events()
self.scheduler_loop(1, [[item, state_id, output]])
# Time warp 1 second!
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
self.check_monitoring_events_log(expected_logs)
def test_logs_hosts(self):
""" Test logs for active / passive checks for hosts
:return: None
"""
self.setup_with_file('cfg/cfg_monitoring_logs.cfg',
dispatching=True)
assert self.conf_is_correct
self._scheduler.pushed_conf.log_initial_states = True
self._scheduler.pushed_conf.log_active_checks = True
self._scheduler.pushed_conf.log_passive_checks = True
host = self._scheduler.hosts.find_by_name("test_host_0")
# Make notifications interval set to 5 minutes
host.notification_interval = 5
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = True
# Freeze the time !
initial_datetime = datetime.datetime(year=2018, month=6, day=1,
hour=18, minute=30, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
# Host active checks
self.check(frozen_datetime, host, 0, 'Host is UP',
[('info', u'ACTIVE HOST CHECK: test_host_0;UP;0;Host is UP')])
self.check(frozen_datetime, host, 0, 'Host is UP',
[('info', u'ACTIVE HOST CHECK: test_host_0;UP;1;Host is UP')])
# Host goes DOWN / SOFT
self.check(frozen_datetime, host, 2, 'Host is DOWN',
[('error', 'ACTIVE HOST CHECK: test_host_0;DOWN;1;Host is DOWN'),
('error', 'HOST ALERT: test_host_0;DOWN;SOFT;1;Host is DOWN'),
('error', 'HOST EVENT HANDLER: test_host_0;DOWN;SOFT;1;eventhandler'),
])
self.check(frozen_datetime, host, 2, 'Host is DOWN',
[('error', 'ACTIVE HOST CHECK: test_host_0;DOWN;1;Host is DOWN'),
('error', 'HOST ALERT: test_host_0;DOWN;SOFT;2;Host is DOWN'),
('error', 'HOST EVENT HANDLER: test_host_0;DOWN;SOFT;2;eventhandler')])
# Host goes DOWN / HARD
self.check(frozen_datetime, host, 2, 'Host is DOWN',
[('error', 'ACTIVE HOST CHECK: test_host_0;DOWN;2;Host is DOWN'),
('error', 'HOST ALERT: test_host_0;DOWN;HARD;3;Host is DOWN'),
('error', 'HOST EVENT HANDLER: test_host_0;DOWN;HARD;3;eventhandler'),
('error', 'HOST NOTIFICATION: test_contact;test_host_0;DOWN;1;notify-host;Host is DOWN')])
# Notification not raised - too soon!
self.check(frozen_datetime, host, 2, 'Host is DOWN',
[('error', 'ACTIVE HOST CHECK: test_host_0;DOWN;3;Host is DOWN')])
# Notification not raised - too soon!
self.check(frozen_datetime, host, 2, 'Host is DOWN',
[('error', 'ACTIVE HOST CHECK: test_host_0;DOWN;3;Host is DOWN')])
# Host goes UP / HARD
# Get an host check, an alert and a notification
self.check(frozen_datetime, host, 0, 'Host is UP',
[('info', 'ACTIVE HOST CHECK: test_host_0;UP;3;Host is UP'),
('info', 'HOST ALERT: test_host_0;UP;HARD;3;Host is UP'),
('info', 'HOST EVENT HANDLER: test_host_0;UP;HARD;3;eventhandler'),
('info', 'HOST NOTIFICATION: test_contact;test_host_0;UP;0;notify-host;Host is UP')
])
self.check(frozen_datetime, host, 0, 'Host is UP',
[('info', 'ACTIVE HOST CHECK: test_host_0;UP;1;Host is UP')])
self.check(frozen_datetime, host, 0, 'Host is UP',
[('info', 'ACTIVE HOST CHECK: test_host_0;UP;1;Host is UP')])
def test_logs_services(self):
""" Test logs for active / passive checks for hosts
:return: None
"""
self.setup_with_file('cfg/cfg_monitoring_logs.cfg',
dispatching=True)
assert self.conf_is_correct
self._scheduler.pushed_conf.log_initial_states = True
self._scheduler.pushed_conf.log_active_checks = True
self._scheduler.pushed_conf.log_passive_checks = True
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = True
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
# Make notifications interval set to 5 minutes
svc.notification_interval = 5
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = True
# Freeze the time !
initial_datetime = datetime.datetime(year=2018, month=6, day=1,
hour=18, minute=30, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
# Get sure that host is UP
self.check(frozen_datetime, host, 0, 'Host is UP',
[('info', 'ACTIVE HOST CHECK: test_host_0;UP;0;Host is UP')])
# Service is ok
self.check(frozen_datetime, svc, 0, 'Service is OK',
[('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;0;Service is OK')])
self.check(frozen_datetime, svc, 0, 'Service is OK',
[('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Service is OK')])
# Service goes warning / SOFT
self.check(frozen_datetime, svc, 1, 'Service is WARNING',
[('warning',
'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;WARNING;1;Service is WARNING'),
('warning',
'SERVICE EVENT HANDLER: test_host_0;test_ok_0;WARNING;SOFT;1;eventhandler'),
('warning',
'SERVICE ALERT: test_host_0;test_ok_0;WARNING;SOFT;1;Service is WARNING'),
])
# Service goes warning / HARD
# Get a service check, an alert and a notification
self.check(frozen_datetime, svc, 1, 'Service is WARNING',
[('warning',
'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;WARNING;1;Service is WARNING'),
('warning',
'SERVICE ALERT: test_host_0;test_ok_0;WARNING;HARD;2;Service is WARNING'),
('warning',
'SERVICE EVENT HANDLER: test_host_0;test_ok_0;WARNING;HARD;2;eventhandler'),
('warning',
'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;'
'WARNING;1;notify-service;Service is WARNING'),
])
# Notification not raised - too soon!
self.check(frozen_datetime, svc, 1, 'Service is WARNING',
[('warning',
'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;WARNING;2;Service is WARNING')])
# Notification not raised - too soon!
self.check(frozen_datetime, svc, 1, 'Service is WARNING',
[('warning',
'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;WARNING;2;Service is WARNING')])
# Service goes OK
self.check(frozen_datetime, svc, 0, 'Service is OK',
[('info',
'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;2;Service is OK'),
('info',
'SERVICE ALERT: test_host_0;test_ok_0;OK;HARD;2;Service is OK'),
('info',
'SERVICE EVENT HANDLER: test_host_0;test_ok_0;OK;HARD;2;eventhandler'),
('info',
'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;OK;0;'
'notify-service;Service is OK')
])
self.check(frozen_datetime, svc, 0, 'Service is OK',
[('info',
'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Service is OK')])
# Service goes CRITICAL
self.check(frozen_datetime, svc, 2, 'Service is CRITICAL',
[('error',
'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Service is CRITICAL'),
('error',
'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Service is CRITICAL'),
('error',
'SERVICE EVENT HANDLER: test_host_0;test_ok_0;CRITICAL;SOFT;1;eventhandler'),
])
self.check(frozen_datetime, svc, 2, 'Service is CRITICAL',
[('error',
'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Service is CRITICAL'),
('error',
'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;HARD;2;Service is CRITICAL'),
('error',
'SERVICE EVENT HANDLER: test_host_0;test_ok_0;CRITICAL;HARD;2;eventhandler'),
('error',
'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;'
'CRITICAL;1;notify-service;Service is CRITICAL')
])
# Service goes OK
self.check(frozen_datetime, svc, 0, 'Service is OK',
[('info',
'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;2;Service is OK'),
('info',
'SERVICE ALERT: test_host_0;test_ok_0;OK;HARD;2;Service is OK'),
('info',
'SERVICE EVENT HANDLER: test_host_0;test_ok_0;OK;HARD;2;eventhandler'),
('info',
'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;'
'OK;0;notify-service;Service is OK')
])
self.check(frozen_datetime, svc, 0, 'Service OK',
[('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Service OK')])
def test_logs_hosts_disabled(self):
""" Test disabled logs for active / passive checks for hosts
:return: None
"""
self.setup_with_file('cfg/cfg_monitoring_logs.cfg',
'cfg/cfg_monitoring_logs_disabled.ini',
dispatching=True)
assert self.conf_is_correct
self._sched = self._scheduler
host = self._scheduler.hosts.find_by_name("test_host_0")
# Make notifications sent very quickly
host.notification_interval = 10.0
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = True
# Freeze the time !
initial_datetime = datetime.datetime(year=2018, month=6, day=1,
hour=18, minute=30, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
# Host active checks
self.check(frozen_datetime, host, 0, 'Host is UP', [])
self.check(frozen_datetime, host, 0, 'Host is UP', [])
# Host goes DOWN / SOFT
self.check(frozen_datetime, host, 2, 'Host is DOWN',
[('error', 'HOST ALERT: test_host_0;DOWN;SOFT;1;Host is DOWN')])
self.check(frozen_datetime, host, 2, 'Host is DOWN',
[('error', 'HOST ALERT: test_host_0;DOWN;SOFT;2;Host is DOWN')])
# Host goes DOWN / HARD
self.check(frozen_datetime, host, 2, 'Host is DOWN',
[('error', 'HOST ALERT: test_host_0;DOWN;HARD;3;Host is DOWN')])
# Host notification raised
self.check(frozen_datetime, host, 2, 'Host is DOWN', [])
self.check(frozen_datetime, host, 2, 'Host is DOWN', [])
# Host goes UP / HARD
# Get an host check, an alert and a notification
self.check(frozen_datetime, host, 0, 'Host is UP',
[('info', 'HOST ALERT: test_host_0;UP;HARD;3;Host is UP')])
self.check(frozen_datetime, host, 0, 'Host is UP', [])
self.check(frozen_datetime, host, 0, 'Host is UP', [])
def test_logs_services_disabled(self):
""" Test disabled logs for active / passive checks for services
:return: None
"""
self.setup_with_file('cfg/cfg_monitoring_logs.cfg',
'cfg/cfg_monitoring_logs_disabled.ini',
dispatching=True)
assert self.conf_is_correct
self._sched = self._scheduler
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
# Make notifications sent very quickly
svc.notification_interval = 10.0
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
# Freeze the time !
initial_datetime = datetime.datetime(year=2018, month=6, day=1,
hour=18, minute=30, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
# Get sure that host is UP
self.check(frozen_datetime, host, 0, 'Host is UP', [])
# Service is ok
self.check(frozen_datetime, svc, 0, 'Service is OK', [])
self.check(frozen_datetime, svc, 0, 'Service is OK', [])
# Service goes warning / SOFT
self.check(frozen_datetime, svc, 1, 'Service is WARNING',
[('warning',
'SERVICE ALERT: test_host_0;test_ok_0;WARNING;SOFT;1;Service is WARNING')])
# Service goes warning / HARD
# Get a service check, an alert and a notification
self.check(frozen_datetime, svc, 1, 'Service is WARNING',
[('warning', 'SERVICE ALERT: test_host_0;test_ok_0;WARNING;HARD;2;Service is WARNING')])
# Service notification raised
self.check(frozen_datetime, svc, 1, 'Service is WARNING', [])
self.check(frozen_datetime, svc, 1, 'Service is WARNING', [])
# Service goes OK
self.check(frozen_datetime, svc, 0, 'Service is OK',
[('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;HARD;2;Service is OK')])
self.check(frozen_datetime, svc, 0, 'Service is OK', [])
# Service goes CRITICAL
self.check(frozen_datetime, svc, 2, 'Service is CRITICAL',
[('error',
'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Service is CRITICAL')])
self.check(frozen_datetime, svc, 2, 'Service is CRITICAL',
[('error',
'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;HARD;2;Service is CRITICAL')])
# Service goes OK
self.check(frozen_datetime, svc, 0, 'Service is OK',
[('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;HARD;2;Service is OK')])
self.check(frozen_datetime, svc, 0, 'Service OK', [])
def test_external_commands(self):
""" Test logs for external commands
:return:
"""
self.setup_with_file('cfg/cfg_monitoring_logs.cfg',
dispatching=True)
assert self.conf_is_correct
now = int(time.time())
# Receiver receives unknown host external command
excmd = '[%d] CHANGE_SVC_MODATTR;test_host_0;test_ok_0;1' % time.time()
self._scheduler.run_external_commands([excmd])
self.external_command_loop()
excmd = '[%d] CHANGE_RETRY_HOST_CHECK_INTERVAL;test_host_0;42' % now
self._scheduler.run_external_commands([excmd])
self.external_command_loop()
expected_logs = [
('info',
'EXTERNAL COMMAND: [%s] CHANGE_RETRY_HOST_CHECK_INTERVAL;test_host_0;42' % now),
('info',
'EXTERNAL COMMAND: [%s] CHANGE_SVC_MODATTR;test_host_0;test_ok_0;1' % now)
]
self.check_monitoring_events_log(expected_logs, dump=True)
def test_passive_checks_host(self):
""" Test logs for external commands - passive host checks, log disabled """
self.passive_checks_host(False)
def test_passive_checks_host_2(self):
""" Test logs for external commands - passive host checks, log enabled """
self.passive_checks_host(True)
def passive_checks_host(self, log_passive_checks):
""" Test logs for external commands
:return:
"""
self.setup_with_file('cfg/cfg_monitoring_logs.cfg',
dispatching=True)
assert self.conf_is_correct
self.clear_events()
# Force the log passive checks configuration parameter
self._scheduler.pushed_conf.log_passive_checks = log_passive_checks
# -----------------------------
# Host part
# -----------------------------
# Get and configure host
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router which we depend of
host.event_handler_enabled = False
assert host is not None
now = int(time.time())
# Receive passive host check Down
excmd = u'[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is dead' % now
self._scheduler.run_external_commands([excmd])
self.external_command_loop()
assert 'DOWN' == host.state
assert 'SOFT' == host.state_type
assert 'Host is dead' == host.output
excmd = u'[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is dead' % now
self._scheduler.run_external_commands([excmd])
self.external_command_loop()
assert 'DOWN' == host.state
assert 'SOFT' == host.state_type
assert 'Host is dead' == host.output
excmd = u'[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is dead' % now
self._scheduler.run_external_commands([excmd])
self.external_command_loop(1)
time.sleep(1.0)
self.external_command_loop(1)
assert 'DOWN' == host.state
assert 'HARD' == host.state_type
assert 'Host is dead' == host.output
# Passive host check log contains:
# - host name,
# - host status,
# - output,
# - performance data and
# - long output
# All are separated with a semi-colon
expected_logs = [
('error',
'HOST ALERT: test_host_0;DOWN;SOFT;1;Host is dead'),
('error',
'HOST ALERT: test_host_0;DOWN;SOFT;2;Host is dead'),
('error',
'HOST ALERT: test_host_0;DOWN;HARD;3;Host is dead'),
('error',
'HOST NOTIFICATION: test_contact;test_host_0;DOWN;1;notify-host;Host is dead')
]
if log_passive_checks:
expected_logs.extend([
('warning',
'PASSIVE HOST CHECK: test_host_0;2;Host is dead;;'),
('warning',
'PASSIVE HOST CHECK: test_host_0;2;Host is dead;;'),
('warning',
'PASSIVE HOST CHECK: test_host_0;2;Host is dead;;'),
])
else:
expected_logs.extend([
('info',
'EXTERNAL COMMAND: [%s] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is dead' % now),
('info',
'EXTERNAL COMMAND: [%s] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is dead' % now),
('info',
'EXTERNAL COMMAND: [%s] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is dead' % now)
])
self.check_monitoring_events_log(expected_logs)
def test_passive_checks_service_log_disabled(self):
""" Test logs for external commands - passive service checks, log disabled """
self.passive_checks_service(False)
def test_passive_checks_service_log_enabled(self):
""" Test logs for external commands - passive service checks, log enabled """
self.passive_checks_service(True)
def passive_checks_service(self, log_passive_checks):
""" Test logs for external commands
:return:
"""
self.setup_with_file('cfg/cfg_monitoring_logs.cfg',
dispatching=True)
assert self.conf_is_correct
self.clear_events()
# Force the log passive checks configuration parameter
self._scheduler.pushed_conf.log_passive_checks = log_passive_checks
now = int(time.time())
# -----------------------------
# Service part
# -----------------------------
# Get host
host = self._scheduler.hosts.find_by_name('test_host_0')
host.checks_in_progress = []
host.event_handler_enabled = False
host.active_checks_enabled = True
host.passive_checks_enabled = True
assert host is not None
# Get service
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.event_handler_enabled = False
svc.active_checks_enabled = True
svc.passive_checks_enabled = True
assert svc is not None
# Passive checks for host and service
# ---------------------------------------------
# Receive passive host check Up
excmd = u'[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;0;Host is UP' % time.time()
self._scheduler.run_external_commands([excmd])
self.external_command_loop()
assert 'UP' == host.state
assert 'Host is UP' == host.output
# Service is going ok ...
excmd = u'[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;0;' \
u'Service is OK|rtt=9999;5;10;0;10000' % now
self._scheduler.run_external_commands([excmd])
self.external_command_loop()
assert 'OK' == svc.state
assert 'Service is OK' == svc.output
assert 'rtt=9999;5;10;0;10000' == svc.perf_data
# Service is going ok ... with long output
excmd = u'[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;0;' \
u'Service is OK and have some special characters: àéèüäï' \
u'|rtt=9999;5;10;0;10000' \
u'\r\nLong output... also some specials: àéèüäï' % now
self._scheduler.run_external_commands([excmd])
self.external_command_loop(1)
time.sleep(1.0)
self.external_command_loop(1)
assert 'OK' == svc.state
assert u'Service is OK and have some special characters: àéèüäï' == svc.output
assert u'rtt=9999;5;10;0;10000' == svc.perf_data
assert u'Long output... also some specials: àéèüäï' == svc.long_output
# Passive service check log contains:
# - host name,
# - host status,
# - output,
# - performance data and
# - long output
# All are separated with a semi-colon
if log_passive_checks:
expected_logs = [
('info', u'PASSIVE HOST CHECK: test_host_0;0;Host is UP;;'),
('info', u'PASSIVE SERVICE CHECK: test_host_0;test_ok_0;0;'
u'Service is OK;;rtt=9999;5;10;0;10000'),
('info', u'PASSIVE SERVICE CHECK: test_host_0;test_ok_0;0;'
u'Service is OK and have some special characters: àéèüäï;'
u'Long output... also some specials: àéèüäï;'
u'rtt=9999;5;10;0;10000'),
]
else:
# Note tht the external command do not log the long output !
expected_logs = [
('info', u'EXTERNAL COMMAND: [%s] PROCESS_HOST_CHECK_RESULT;'
u'test_host_0;0;Host is UP' % now),
('info', u'EXTERNAL COMMAND: [%s] PROCESS_SERVICE_CHECK_RESULT;'
u'test_host_0;test_ok_0;0;Service is OK|rtt=9999;5;10;0;10000' % now),
('info', u'EXTERNAL COMMAND: [%s] PROCESS_SERVICE_CHECK_RESULT;'
u'test_host_0;test_ok_0;0;Service is OK and have some special characters: àéèüäï'
u'|rtt=9999;5;10;0;10000\r\nLong output... also some specials: àéèüäï' % now),
]
self.check_monitoring_events_log(expected_logs)
def test_special_external_commands(self):
""" Test logs for special external commands
:return:
"""
self.setup_with_file('cfg/cfg_monitoring_logs.cfg',
dispatching=True)
assert self.conf_is_correct
now = int(time.time())
# RESTART_PROGRAM
excmd = '[%d] RESTART_PROGRAM' % now
self._scheduler.run_external_commands([excmd])
self.external_command_loop()
self.assert_any_log_match('RESTART command : tests/libexec/sleep_command.sh 3')
# RELOAD_CONFIG
excmd = '[%d] RELOAD_CONFIG' % now
self._scheduler.run_external_commands([excmd])
self.external_command_loop()
self.assert_any_log_match('RELOAD command : tests/libexec/sleep_command.sh 2')
# UNKNOWN COMMAND
excmd = '[%d] UNKNOWN_COMMAND' % now
self._scheduler.run_external_commands([excmd])
self.external_command_loop()
# Malformed command
excmd = '[%d] MALFORMED COMMAND' % now
self._scheduler.run_external_commands([excmd])
self.external_command_loop()
# The messages are echoed by the launched scripts
expected_logs = [
('info', 'EXTERNAL COMMAND: [%s] RESTART_PROGRAM' % now),
('info', 'RESTART: I start sleeping for 3 seconds...\nI awoke after sleeping 3 seconds | sleep=3'),
('info', 'EXTERNAL COMMAND: [%s] RELOAD_CONFIG' % now),
('info', 'RELOAD: I start sleeping for 2 seconds...\nI awoke after sleeping 2 seconds | sleep=2'),
('error', "Command '[%s] UNKNOWN_COMMAND' is not recognized, sorry" % now),
('error', "Malformed command: '[%s] MALFORMED COMMAND'" % now)
]
self.check_monitoring_events_log(expected_logs)
def test_special_external_commands_no_logs(self):
""" Test no logs for special external commands
:return:
"""
self.setup_with_file('cfg/cfg_monitoring_logs.cfg',
'cfg/cfg_monitoring_logs_disabled.ini',
dispatching=True)
assert self.conf_is_correct
now = int(time.time())
# RESTART_PROGRAM
excmd = '[%d] RESTART_PROGRAM' % now
self._scheduler.run_external_commands([excmd])
self.external_command_loop()
# todo: it should not but it does!
self.assert_any_log_match('RESTART command : tests/libexec/sleep_command.sh 3')
# self.assert_no_log_match('RESTART command : libexec/sleep_command.sh 3')
# RELOAD_CONFIG
excmd = '[%d] RELOAD_CONFIG' % now
self._scheduler.run_external_commands([excmd])
self.external_command_loop()
# todo: it should not but it does!
self.assert_any_log_match('RELOAD command : tests/libexec/sleep_command.sh 2')
# self.assert_no_log_match('RELOAD command : libexec/sleep_command.sh 2')
# No monitoring logs
# todo: it should not but it does!
# assert [] == monitoring_logs
expected_logs = [
# ('info', 'EXTERNAL COMMAND: [%d] RESTART_PROGRAM' % now),
('info', 'RESTART: I start sleeping for 3 seconds...\nI awoke after sleeping 3 seconds | sleep=3'),
# ('info', 'EXTERNAL COMMAND: [%d] RELOAD_CONFIG' % now),
('info', 'RELOAD: I start sleeping for 2 seconds...\nI awoke after sleeping 2 seconds | sleep=2'),
]
self.check_monitoring_events_log(expected_logs)
def test_timeperiod_transition_log(self):
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
tp = self._scheduler.timeperiods.find_by_name('24x7')
self.assertIsNot(tp, None)
data = tp.check_and_log_activation_change().prepare()
assert data['level'] == 'info'
assert data['message'] == 'TIMEPERIOD TRANSITION: 24x7;-1;1'
# Now make this tp unable to be active again by removing al it's daterange
dr = tp.dateranges
tp.dateranges = []
data = tp.check_and_log_activation_change().prepare()
assert data['level'] == 'info'
assert data['message'] == 'TIMEPERIOD TRANSITION: 24x7;1;0'
# Ok, let get back to work
tp.dateranges = dr
data = tp.check_and_log_activation_change().prepare()
assert data['level'] == 'info'
assert data['message'] == 'TIMEPERIOD TRANSITION: 24x7;0;1'
|
class TestMonitoringLogs(AlignakTest):
'''
This class test the check_result brok
'''
def setUp(self):
pass
def check(self, frozen_datetime, item, state_id, output, expected_logs):
'''
:param item: concerned item
:param state_id: state identifier
:param output: state text
:param expected_logs: expected monitoring logs
:return:
'''
pass
def test_logs_hosts(self):
''' Test logs for active / passive checks for hosts
:return: None
'''
pass
def test_logs_services(self):
''' Test logs for active / passive checks for hosts
:return: None
'''
pass
def test_logs_hosts_disabled(self):
''' Test disabled logs for active / passive checks for hosts
:return: None
'''
pass
def test_logs_services_disabled(self):
''' Test disabled logs for active / passive checks for services
:return: None
'''
pass
def test_external_commands(self):
''' Test logs for external commands
:return:
'''
pass
def test_passive_checks_host(self):
''' Test logs for external commands - passive host checks, log disabled '''
pass
def test_passive_checks_host_2(self):
''' Test logs for external commands - passive host checks, log enabled '''
pass
def passive_checks_host(self, log_passive_checks):
''' Test logs for external commands
:return:
'''
pass
def test_passive_checks_service_log_disabled(self):
''' Test logs for external commands - passive service checks, log disabled '''
pass
def test_passive_checks_service_log_enabled(self):
''' Test logs for external commands - passive service checks, log enabled '''
pass
def passive_checks_service(self, log_passive_checks):
''' Test logs for external commands
:return:
'''
pass
def test_special_external_commands(self):
''' Test logs for special external commands
:return:
'''
pass
def test_special_external_commands_no_logs(self):
''' Test no logs for special external commands
:return:
'''
pass
def test_timeperiod_transition_log(self):
pass
| 17 | 15 | 42 | 6 | 27 | 9 | 1 | 0.32 | 1 | 4 | 0 | 0 | 16 | 1 | 16 | 71 | 688 | 115 | 439 | 53 | 422 | 141 | 263 | 49 | 246 | 2 | 2 | 1 | 18 |
4,132 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_notification_ways.py
|
tests.test_notification_ways.TestNotificationWay
|
class TestNotificationWay(AlignakTest):
def setUp(self):
super(TestNotificationWay, self).setUp()
self.setup_with_file('cfg/cfg_notification_ways.cfg', dispatching=True)
assert self.conf_is_correct
def test_create_nw(self):
""" Test notification ways creation and check"""
host_sms = self._scheduler.commands.find_by_name('notify-host-sms')
service_sms = self._scheduler.notificationways.find_by_name('notify-service-sms')
# Create a notification way with parameters
parameters = {
'notificationway_name': 'email_in_day',
'register': True,
'host_notification_commands': 'notify-host-sms',
'host_notification_options': 'durfs',
'host_notification_period': '24x7',
'min_business_impact': 0,
'service_notification_commands': 'notify-service-sms',
'service_notification_options': 'wucrf',
'service_notification_period': '24x7',
'use': ''
}
nw = NotificationWay(parameters)
# And it will add an uuid
parameters['uuid'] = nw.uuid
# Those parameters are missing in the provided parameters but they will exist in the object
parameters.update({
# Transformed properties
'host_notification_commands': ['notify-host-sms'],
'host_notification_options': ['durfs'],
'service_notification_commands': ['notify-service-sms'],
'service_notification_options': ['wucrf'],
'use': [],
# Some more properties
# 'definition_order': 100,
'imported_from': 'alignak-self',
# 'name': '',
'configuration_errors': [],
'configuration_warnings': [],
'customs': {},
'plus': {},
'tags': [],
'downtimes': {},
'conf_is_correct': True
})
# creation_time and log_actions will not be modified! They are set
# only if they do not yet exist
assert nw.__dict__ == parameters
def test_correct_nw(self):
""" Test check notification way is correct"""
# self.show_logs()
# Get a NW
email_in_day = self._scheduler.notificationways.find_by_name('email_in_day')
assert email_in_day.is_correct()
pprint(email_in_day.__dict__)
# Default is correct
test = copy.deepcopy(email_in_day)
assert test.host_notification_options == [u'd', u'u', u'r', u'f', u's']
assert test.service_notification_options == [u'w', u'u', u'c', u'r', u'f']
assert test.host_notifications_enabled is True
assert test.service_notifications_enabled is True
assert test.is_correct()
# If no notifications are enabled, it will be correct anyway...
test = copy.deepcopy(email_in_day)
test.host_notification_options = ['n']
test.service_notification_options = ['n']
assert test.host_notifications_enabled is False
assert test.service_notifications_enabled is False
assert test.is_correct()
test = copy.deepcopy(email_in_day)
assert test.host_notifications_enabled is True
assert test.service_notifications_enabled is True
assert test.is_correct()
test = copy.deepcopy(email_in_day)
# No defined commands
test.__dict__.pop('host_notification_commands')
test.__dict__.pop('service_notification_commands')
assert test.is_correct()
assert test.configuration_warnings == [
'[notificationway::email_in_day] do not have any service_notification_commands defined',
'[notificationway::email_in_day] do not have any host_notification_commands defined'
]
test = copy.deepcopy(email_in_day)
# No defined commands
test.host_notification_commands = None
test.service_notification_commands = None
assert test.is_correct()
assert test.configuration_warnings == [
'[notificationway::email_in_day] do not have any service_notification_commands defined',
'[notificationway::email_in_day] do not have any host_notification_commands defined'
]
assert test.get_notification_commands('host') == []
assert test.get_notification_commands('service') == []
test = copy.deepcopy(email_in_day)
test.host_notification_period = None
test.host_notification_commands = [None]
test.service_notification_period = None
test.service_notification_commands = [None]
test.configuration_errors = []
assert not test.is_correct()
pprint(test.__dict__)
assert '[notificationway::email_in_day] a service_notification_command is missing' \
in test.configuration_errors
assert '[notificationway::email_in_day] a host_notification_command is missing' \
in test.configuration_errors
assert '[notificationway::email_in_day] the service_notification_period is invalid' \
in test.configuration_errors
assert '[notificationway::email_in_day] the host_notification_period is invalid' \
in test.configuration_errors
def test_contact_nw(self):
""" Test notification ways for a contact"""
now = time.time()
# Get the contact
contact = self._scheduler.contacts.find_by_name("test_contact")
print("All notification Way:")
for nw in self._scheduler.notificationways:
print("\t%s" % nw.notificationway_name)
assert nw.is_correct()
# 3 defined NWs and 3 self created NWs (because 3 contacts exist with some
# special properties: period,commands, ...)
assert len(self._scheduler.notificationways) == 6
email_in_day = self._scheduler.notificationways.find_by_name('email_in_day')
assert email_in_day.uuid in contact.notificationways
sms_the_night = self._scheduler.notificationways.find_by_name('sms_the_night')
assert sms_the_night.uuid in contact.notificationways
# And check the criticity values
assert 0 == email_in_day.min_business_impact
assert 5 == sms_the_night.min_business_impact
print("Contact '%s' notification way(s):" % contact.get_name())
for nw_id in contact.notificationways:
nw = self._scheduler.notificationways[nw_id]
print("\t %s (or %s)" % (nw.notificationway_name, nw.get_name()))
# Get host notifications commands
for c in nw.host_notification_commands:
print("\t\t%s" % c.get_name())
for c in nw.get_notification_commands('host'):
print("\t\t%s" % c.get_name())
# Get service notifications commands
for c in nw.service_notification_commands:
print("\t\t%s" % c.get_name())
for c in nw.get_notification_commands('service'):
print("\t\t%s" % c.get_name())
# 2 NWs for 'test_contact'
assert len(contact.notificationways) == 2
print("Contact '%s' commands:" % (contact.get_name()))
# Get host notifications commands
for c in contact.host_notification_commands:
print("\t\tcontact host property: %s" % c.get_name())
for c in contact.get_notification_commands(self._scheduler.notificationways, 'host'):
print("\t\tcontact host get_notification_commands:", c.get_name())
# 2 commands for host notification (one from the NW and one contact defined)
assert len(contact.host_notification_commands) == 2
# Get service notifications commands
for c in contact.service_notification_commands:
print("\t\tcontact service property: %s" % c.get_name())
for c in contact.get_notification_commands(self._scheduler.notificationways, 'service'):
print("\t\tcontact service get_notification_commands:", c.get_name())
# 2 commands for service notification (one from the NW and one contact defined)
assert len(contact.service_notification_commands) == 2
contact_simple = self._scheduler.contacts.find_by_name("test_contact_simple")
# It's the created notification way for this simple contact
test_contact_simple_inner_notificationway = \
self._scheduler.notificationways.find_by_name("test_contact_simple_inner_nw")
print("Simple contact")
for nw_id in contact_simple.notificationways:
nw = self._scheduler.notificationways[nw_id]
print("\t%s" % nw.notificationway_name)
for c in nw.service_notification_commands:
print("\t\t%s" % c.get_name())
assert test_contact_simple_inner_notificationway.uuid in contact_simple.notificationways
# we take as criticity a huge value from now
huge_criticity = 5
# Now all want* functions
# First is ok with warning alerts
assert email_in_day.want_service_notification(self._scheduler.timeperiods,
now, 'WARNING', 'PROBLEM',
huge_criticity) is True
# But a SMS is now WAY for warning. When we sleep, we wake up for critical only guy!
assert sms_the_night.want_service_notification(self._scheduler.timeperiods,
now, 'WARNING', 'PROBLEM',
huge_criticity) is False
# Same with contacts now
# First is ok for warning in the email_in_day nw
assert contact.want_service_notification(self._scheduler.notificationways,
self._scheduler.timeperiods,
now, 'WARNING', 'PROBLEM', huge_criticity) is True
# Simple is not ok for it
assert contact_simple.want_service_notification(self._scheduler.notificationways,
self._scheduler.timeperiods,
now, 'WARNING', 'PROBLEM',
huge_criticity) is False
# Then for host notification
# First is ok for warning in the email_in_day nw
assert contact.want_host_notification(self._scheduler.notificationways,
self._scheduler.timeperiods,
now, 'FLAPPING', 'PROBLEM', huge_criticity) is True
# Simple is not ok for it
assert contact_simple.want_host_notification(self._scheduler.notificationways,
self._scheduler.timeperiods,
now, 'FLAPPING', 'PROBLEM',
huge_criticity) is False
# And now we check that we refuse SMS for a low level criticity
# I do not want to be awaken by a dev server! When I sleep, I sleep!
# (and my wife will kill me if I do...)
# We take the EMAIL test because SMS got the night ony, so we
# take a very low value for criticity here
assert email_in_day.want_service_notification(self._scheduler.timeperiods,
now, 'WARNING', 'PROBLEM', -1) is False
# Test the heritage for notification ways
host_template = self._scheduler.hosts.find_by_name("test_host_contact_template")
contact_template_1 = self._scheduler.contacts[host_template.contacts[0]]
commands_contact_template_1 = contact_template_1.get_notification_commands(
self._scheduler.notificationways,'host')
contact_template_2 = self._scheduler.contacts[host_template.contacts[1]]
commands_contact_template_2 = contact_template_2.get_notification_commands(
self._scheduler.notificationways,'host')
resp = sorted([sorted([command.get_name() for command in commands_contact_template_1]),
sorted([command.get_name() for command in commands_contact_template_2])])
assert sorted([['notify-host', 'notify-host-work'],
['notify-host-sms', 'notify-host-work']]) == resp
contact_template_1 = self._scheduler.contacts[host_template.contacts[0]]
commands_contact_template_1 = contact_template_1.get_notification_commands(
self._scheduler.notificationways,'service')
contact_template_2 = self._scheduler.contacts[host_template.contacts[1]]
commands_contact_template_2 = contact_template_2.get_notification_commands(
self._scheduler.notificationways,'service')
resp = sorted([sorted([command.get_name() for command in commands_contact_template_1]),
sorted([command.get_name() for command in commands_contact_template_2])])
assert sorted([['notify-service', 'notify-service-work'],
['notify-service-sms', 'notify-service-work']]) == resp
|
class TestNotificationWay(AlignakTest):
def setUp(self):
pass
def test_create_nw(self):
''' Test notification ways creation and check'''
pass
def test_correct_nw(self):
''' Test check notification way is correct'''
pass
def test_contact_nw(self):
''' Test notification ways for a contact'''
pass
| 5 | 3 | 65 | 8 | 46 | 12 | 4 | 0.25 | 1 | 2 | 1 | 0 | 4 | 0 | 4 | 59 | 265 | 35 | 184 | 27 | 179 | 46 | 125 | 27 | 120 | 13 | 2 | 2 | 16 |
4,133 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_notifications.py
|
tests.test_notifications.TestNotifications
|
class TestNotifications(AlignakTest):
"""
This class test notifications
"""
def setUp(self):
super(TestNotifications, self).setUp()
def test_0_nonotif(self):
""" Test with notifications disabled in service definition
:return: None
"""
self.setup_with_file('cfg/cfg_nonotif.cfg',
dispatching=True)
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self._scheduler.services.find_srv_by_name_and_hostname(
"test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
time.sleep(0.1)
assert 0 == svc.current_notification_number, 'All OK no notifications'
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
time.sleep(0.1)
assert "SOFT" == svc.state_type
assert 0 == svc.current_notification_number, 'Critical SOFT, no notifications'
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
time.sleep(0.1)
assert "HARD" == svc.state_type
assert 0 == svc.current_notification_number, 'Critical HARD, no notifications'
self.assert_actions_count(1)
self.assert_actions_match(0, 'VOID', 'command')
self.scheduler_loop(1, [[svc, 0, 'OK']])
time.sleep(0.1)
assert 0 == svc.current_notification_number, 'Ok HARD, no notifications'
self.assert_actions_count(0)
def test_1_nonotif_enable_with_extcmd(self):
""" Test notification disabled in service definition but enabled later
with an external command
:return: None
"""
self.setup_with_file('cfg/cfg_nonotif.cfg',
dispatching=True)
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self._scheduler.services.find_srv_by_name_and_hostname(
"test_host_0", "test_ok_0")
# notification_interval is in minute, configure to have one per minute
svc.notification_interval = 1
# No notifications enabled by configuration!
assert not svc.notifications_enabled
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
# Freeze the time !
initial_datetime = datetime.datetime(year=2018, month=6, day=1,
hour=18, minute=30, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert 0 == host.current_notification_number, 'Raised a notification!'
assert 0 == svc.current_notification_number, 'Raised a notification!'
# Time warp
frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1))
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "SOFT" == svc.state_type
assert 0 == svc.current_notification_number, \
'Critical SOFT, should not have notification!'
self.assert_actions_count(0)
# Time warp
frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1))
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
assert "HARD" == svc.state_type
assert 0 == svc.current_notification_number, \
'Critical HARD, but have a notification whereas it is disabled!'
# No raised notification !
self.show_actions()
# Raised only a master notification!
self.assert_actions_count(1)
# External command to enable the notifications for the service
now = int(time.time())
cmd = "[{0}] ENABLE_SVC_NOTIFICATIONS;{1};{2}\n".format(now, svc.host_name,
svc.service_description)
self._scheduler.run_external_commands([cmd])
self.external_command_loop()
assert svc.notifications_enabled
assert "HARD" == svc.state_type
assert "CRITICAL" == svc.state
# Time warp
frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1))
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
# Notification !
self.show_actions()
assert 1 == svc.current_notification_number, \
'Critical HARD, must have 1 notification'
self.assert_actions_count(2)
self.assert_actions_match(0, 'serviceoutput CRITICAL', 'command')
self.assert_actions_match(1, 'VOID', 'command')
# Time warp
frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1))
# The service recovers
self.scheduler_loop(1, [[svc, 0, 'OK']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert 0 == svc.current_notification_number, 'Ok HARD, no notifications'
self.assert_actions_count(2)
self.assert_actions_match(0, 'serviceoutput CRITICAL', 'command')
self.assert_actions_match(1, 'serviceoutput OK', 'command')
def test_1_notifications_service_with_no_contacts(self):
""" Test notifications are sent to host contacts for a service with no defined contacts
:return: None
"""
self.setup_with_file('cfg/cfg_nonotif.cfg',
dispatching=True)
host = self._scheduler.hosts.find_by_name("test_host_contact")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
print("Host: %s" % host)
print("Host contacts groups: %s" % host.contact_groups)
print("Host contacts: %s" % host.contacts)
assert host.contacts != []
assert host.notifications_enabled
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_contact",
"test_no_contacts")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
print("Service: %s" % svc)
print("Service contacts: %s" % svc.contacts)
# The service has inherited the host contacts !
assert svc.contacts == host.contacts
assert svc.notifications_enabled
# notification_interval is in minute, configure to have one per minute
svc.notification_interval = 1
# Freeze the time !
initial_datetime = datetime.datetime(year=2018, month=6, day=1,
hour=18, minute=30, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert 0 == svc.current_notification_number, 'All OK no notifications'
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "SOFT" == svc.state_type
assert 0 == svc.current_notification_number, 'Critical SOFT, no notifications'
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "HARD" == svc.state_type
assert "CRITICAL" == svc.state
assert 1 == svc.current_notification_number, 'Critical HARD, must have 1 notification'
self.show_actions()
self.assert_actions_count(2)
self.assert_actions_match(1, 'VOID', 'command')
self.assert_actions_match(1, 'PROBLEM', 'type')
self.assert_actions_match(0, 'PROBLEM', 'type')
self.assert_actions_match(0, '/notifier.pl --hostname test_host_contact --servicedesc test_no_contacts '
'--notificationtype PROBLEM --servicestate CRITICAL --serviceoutput CRITICAL ', 'command')
self.assert_actions_match(0, '--serviceattempt 2 --servicestatetype HARD', 'command')
self.assert_actions_match(0, 'NOTIFICATIONTYPE=PROBLEM, '
'NOTIFICATIONRECIPIENTS=test_contact, '
'NOTIFICATIONISESCALATED=False, '
'NOTIFICATIONAUTHOR=n/a, '
'NOTIFICATIONAUTHORNAME=n/a, '
'NOTIFICATIONAUTHORALIAS=n/a, '
'NOTIFICATIONCOMMENT=n/a, '
'HOSTNOTIFICATIONNUMBER=1, '
'SERVICENOTIFICATIONNUMBER=1, ', 'command')
# Time warp 1 minute
frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1))
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
self.show_actions()
self.assert_actions_count(3)
self.assert_actions_match(2, 'VOID', 'command')
self.assert_actions_match(2, 'PROBLEM', 'type')
self.assert_actions_match(1, 'PROBLEM', 'type')
self.assert_actions_match(1, '/notifier.pl --hostname test_host_contact --servicedesc test_no_contacts '
'--notificationtype PROBLEM --servicestate CRITICAL --serviceoutput CRITICAL ', 'command')
self.assert_actions_match(1, '--serviceattempt 2 --servicestatetype HARD', 'command')
self.assert_actions_match(1, 'HOSTNOTIFICATIONNUMBER=2, SERVICENOTIFICATIONNUMBER=2, ', 'command')
self.assert_actions_match(0, 'PROBLEM', 'type')
self.assert_actions_match(0, '/notifier.pl --hostname test_host_contact --servicedesc test_no_contacts '
'--notificationtype PROBLEM --servicestate CRITICAL --serviceoutput CRITICAL ', 'command')
self.assert_actions_match(0, '--serviceattempt 2 --servicestatetype HARD', 'command')
self.assert_actions_match(0, 'HOSTNOTIFICATIONNUMBER=1, SERVICENOTIFICATIONNUMBER=1, ', 'command')
self.scheduler_loop(1, [[svc, 0, 'OK']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "HARD" == svc.state_type
assert "OK" == svc.state
assert 0 == svc.current_notification_number, 'Ok HARD, no notifications'
# 1st notification for service critical
self.show_actions()
self.assert_actions_match(0, 'PROBLEM', 'type')
self.assert_actions_match(0, 'notifier.pl --hostname test_host_contact --servicedesc test_no_contacts '
'--notificationtype PROBLEM --servicestate CRITICAL --serviceoutput CRITICAL', 'command')
self.assert_actions_match(0, 'HOSTNOTIFICATIONNUMBER=1, SERVICENOTIFICATIONNUMBER=1', 'command')
# 2nd notification for service recovery
self.assert_actions_match(1, 'PROBLEM', 'type')
self.assert_actions_match(1, 'notifier.pl --hostname test_host_contact --servicedesc test_no_contacts '
'--notificationtype PROBLEM --servicestate CRITICAL --serviceoutput CRITICAL', 'command')
self.assert_actions_match(1, 'HOSTNOTIFICATIONNUMBER=2, SERVICENOTIFICATIONNUMBER=2', 'command')
# 2nd notification for service recovery
self.assert_actions_match(2, 'RECOVERY', 'type')
self.assert_actions_match(2, 'notifier.pl --hostname test_host_contact --servicedesc test_no_contacts '
'--notificationtype RECOVERY --servicestate OK --serviceoutput OK', 'command')
self.assert_actions_match(2, 'HOSTNOTIFICATIONNUMBER=0, SERVICENOTIFICATIONNUMBER=0', 'command')
def test_2_notifications(self):
""" Test notifications sent in normal mode
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
# notification_interval is in minute, configure to have one per minute
svc.notification_interval = 1
# Freeze the time !
initial_datetime = datetime.datetime(year=2018, month=6, day=1,
hour=18, minute=30, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert svc.current_notification_number == 0, 'All OK no notifications'
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "SOFT" == svc.state_type
assert svc.current_notification_number == 0, 'Critical SOFT, no notifications'
self.assert_actions_count(0)
# create master notification + create first notification
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "HARD" == svc.state_type
# 2 actions
# * 1 - VOID = notification master
# * 2 - notifier.pl to test_contact
self.show_actions()
self.assert_actions_count(2)
assert svc.current_notification_number == 1, 'Critical HARD, must have 1 notification'
# no changes, because we do not need yet to create a second notification
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "HARD" == svc.state_type
self.assert_actions_count(2)
# Time warp 1 minute 1 second
frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1))
# notification #2
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
self.assert_actions_count(3)
assert svc.current_notification_number == 2
# Time warp 1 minute 1 second
frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1))
# notification #3
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
self.assert_actions_count(4)
assert svc.current_notification_number == 3
# Time warp 10 seconds
frozen_datetime.tick(delta=datetime.timedelta(seconds=10))
# Too soon for a new one
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
self.assert_actions_count(4)
assert svc.current_notification_number == 3
# Simulate the first notification is sent ...
self.show_actions()
actions = sorted(list(self._scheduler.actions.values()), key=lambda x: x.creation_time)
action = copy.copy(actions[1])
action.exit_status = 0
action.status = 'launched'
# and return to the scheduler
self._scheduler.waiting_results.put(action)
# re-loop scheduler to manage this
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# One less notification ... because sent !
self.assert_actions_count(3)
# But still the same notification number
assert svc.current_notification_number == 3
# Disable the contact notification
# -----
cmd = "[%lu] DISABLE_CONTACT_SVC_NOTIFICATIONS;test_contact" % time.time()
self._scheduler.run_external_commands([cmd])
# Time warp 1 minute 1 second
frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1))
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
# Not one more notification ...
self.assert_actions_count(3)
assert svc.current_notification_number == 3
# Time warp 1 minute 1 second
frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1))
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
# Not one more notification ...
self.assert_actions_count(3)
assert svc.current_notification_number == 3
# Enable the contact notification
# -----
cmd = "[%lu] ENABLE_CONTACT_SVC_NOTIFICATIONS;test_contact" % time.time()
self._scheduler.run_external_commands([cmd])
# Time warp 1 minute 1 second
frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1))
# 2 loop turns this time ...
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
self.assert_actions_count(4)
assert svc.current_notification_number == 4
self.show_actions()
# 1st notification for service critical => sent !
# self.assert_actions_match(0, 'notifier.pl --hostname test_host_0 --servicedesc test_ok_0 --notificationtype PROBLEM --servicestate CRITICAL --serviceoutput CRITICAL', 'command')
# self.assert_actions_match(0, 'HOSTNOTIFICATIONNUMBER=1, SERVICENOTIFICATIONNUMBER=1', 'command')
# 2nd notification for service critical
self.assert_actions_match(0, 'notifier.pl --hostname test_host_0 --servicedesc test_ok_0 --notificationtype PROBLEM --servicestate CRITICAL --serviceoutput CRITICAL', 'command')
self.assert_actions_match(0, 'HOSTNOTIFICATIONNUMBER=2, SERVICENOTIFICATIONNUMBER=2', 'command')
# 3rd notification for service critical
self.assert_actions_match(1, 'notifier.pl --hostname test_host_0 --servicedesc test_ok_0 --notificationtype PROBLEM --servicestate CRITICAL --serviceoutput CRITICAL', 'command')
self.assert_actions_match(1, 'HOSTNOTIFICATIONNUMBER=3, SERVICENOTIFICATIONNUMBER=3', 'command')
# 4th notification for service critical
self.assert_actions_match(2, 'notifier.pl --hostname test_host_0 --servicedesc test_ok_0 --notificationtype PROBLEM --servicestate CRITICAL --serviceoutput CRITICAL', 'command')
self.assert_actions_match(2, 'HOSTNOTIFICATIONNUMBER=4, SERVICENOTIFICATIONNUMBER=4', 'command')
self.scheduler_loop(1, [[svc, 0, 'OK']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
# The service recovered, the current notification number is reset !
assert svc.current_notification_number == 0
# Actions count did not changed because:
# 1/ a new recovery notification is created
# 2/ the master problem notification is removed
self.assert_actions_count(4)
self.show_actions()
# 1st recovery notification for service recovery
self.assert_actions_match(3, 'notifier.pl --hostname test_host_0 --servicedesc test_ok_0 --notificationtype RECOVERY --servicestate OK --serviceoutput OK', 'command')
self.assert_actions_match(3, 'NOTIFICATIONTYPE=RECOVERY', 'command')
self.assert_actions_match(3, 'HOSTNOTIFICATIONNUMBER=0, SERVICENOTIFICATIONNUMBER=0', 'command')
def test_3_notifications(self):
""" Test notifications of service states OK -> WARNING -> CRITICAL -> OK
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname(
"test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
# notification_interval is in minute, configure to have one per minute
svc.notification_interval = 1
# Freeze the time !
initial_datetime = datetime.datetime(year=2018, month=6, day=1,
hour=18, minute=30, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
time.sleep(0.1)
assert 0 == svc.current_notification_number, 'All OK no notifications'
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "SOFT" == svc.state_type
assert 0 == svc.current_notification_number, 'Warning SOFT, no notifications'
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "WARNING" == svc.state
assert "HARD" == svc.state_type
assert 1 == svc.current_notification_number, 'Warning HARD, must have 1 notification'
self.assert_actions_count(2)
self.show_actions()
self.assert_actions_match(0, 'serviceoutput WARNING', 'command')
self.assert_actions_match(1, 'VOID', 'command')
print(("Last hard state: %s" % svc.last_hard_state))
assert "WARNING" == svc.last_hard_state
# Time warp 5 minutes
frozen_datetime.tick(delta=datetime.timedelta(minutes=5))
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "CRITICAL" == svc.state
assert "HARD" == svc.state_type
assert "CRITICAL" == svc.last_hard_state
assert 2 == svc.current_notification_number, 'Critical HARD, must have 2 notifications'
self.assert_actions_count(3)
self.show_actions()
self.assert_actions_match(0,
'--notificationtype PROBLEM --servicestate WARNING',
'command')
self.assert_actions_match(1,
'--notificationtype PROBLEM --servicestate CRITICAL',
'command')
self.assert_actions_match(2,
'VOID',
'command')
self.scheduler_loop(1, [[svc, 0, 'OK']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert 0 == svc.current_notification_number
self.show_actions()
self.assert_actions_count(3)
self.assert_actions_match(0,
'--notificationtype PROBLEM --servicestate WARNING',
'command')
self.assert_actions_match(1,
'--notificationtype PROBLEM --servicestate CRITICAL',
'command')
self.assert_actions_match(2,
'--notificationtype RECOVERY --servicestate OK',
'command')
def test_4_notifications(self):
""" Test notifications of service states OK -> CRITICAL -> WARNING -> OK
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname(
"test_host_0", "test_ok_0")
# To make tests quicker we make notifications send quickly (6 second)
svc.notification_interval = 0.1
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
# notification_interval is in minute, configure to have one per minute
svc.notification_interval = 1
# Freeze the time !
initial_datetime = datetime.datetime(year=2018, month=6, day=1,
hour=18, minute=30, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert 0 == svc.current_notification_number, 'All OK no notifications'
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "SOFT" == svc.state_type
assert 0 == svc.current_notification_number, 'Critical SOFT, no notifications'
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "HARD" == svc.state_type
assert 1 == svc.current_notification_number, 'Critical HARD, must have 1 ' \
'notification'
self.assert_actions_count(2)
self.show_actions()
self.assert_actions_match(0,
'--notificationtype PROBLEM --servicestate CRITICAL',
'command')
self.assert_actions_match(1,
'VOID',
'command')
# Time warp 5 minutes
frozen_datetime.tick(delta=datetime.timedelta(minutes=5))
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "HARD" == svc.state_type
assert 2 == svc.current_notification_number, 'Warning HARD, must have 3 ' \
'notification'
self.show_actions()
self.assert_actions_match(0,
'--notificationtype PROBLEM --servicestate CRITICAL',
'command')
self.assert_actions_match(1,
'--notificationtype PROBLEM --servicestate WARNING',
'command')
self.assert_actions_match(2,
'VOID',
'command')
def test_notifications_passive_host(self):
""" Test notifications for passively check hosts
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
# Check freshness on each scheduler tick
self._scheduler.update_recurrent_works_tick({'tick_check_freshness': 1})
# Get host
host = self._scheduler.hosts.find_by_name('test_host_0')
host.act_depend_of = [] # ignore the router
host.checks_in_progress = []
host.event_handler_enabled = False
host.active_checks_enabled = False
host.passive_checks_enabled = True
host.check_freshness = True
host.max_check_attempts = 1
host.freshness_threshold = 1800
host.freshness_state = 'd'
print(("Host: %s - state: %s/%s, freshness: %s / %s, attempts: %s" % (
host, host.state_type, host.state, host.check_freshness, host.freshness_threshold,
host.max_check_attempts)))
print(("Host: %s - state: %s/%s, last state update: %s" % (
host, host.state_type, host.state, host.last_state_update)))
assert host is not None
# Get service
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.event_handler_enabled = False
svc.active_checks_enabled = False
svc.passive_checks_enabled = True
svc.check_freshness = True
svc.freshness_threshold = 120
assert svc is not None
print(("Service: %s - state: %s/%s, freshness: %s / %s" % (svc, svc.state_type, svc.state,
svc.check_freshness,
svc.freshness_threshold)))
# Freeze the time !
initial_datetime = datetime.datetime(year=2017, month=6, day=1,
hour=18, minute=30, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
self.external_command_loop()
time.sleep(0.1)
# Freshness ok !
assert not host.freshness_expired
assert "UP" == host.state
assert "HARD" == host.state_type
assert host.attempt == 0
assert host.max_check_attempts == 1
assert host.current_notification_number == 0, 'All OK no notifications'
self.assert_actions_count(0)
print(("Host: %s - state: %s/%s, last state update: %s" % (
host, host.state_type, host.state, host.last_state_update)))
# Time warp 1 hour
frozen_datetime.tick(delta=datetime.timedelta(hours=1))
self.manage_freshness_check(1)
self.show_logs()
# Time warp 10 seconds
frozen_datetime.tick(delta=datetime.timedelta(seconds=10))
# Check freshness on each scheduler tick
self._scheduler.update_recurrent_works_tick({'tick_check_freshness': 1})
self.manage_freshness_check(1)
self.show_logs()
time.sleep(0.1)
# Freshness expired !
assert host.freshness_expired
assert "DOWN" == host.state
assert "HARD" == host.state_type
assert host.attempt == 1
assert host.max_check_attempts == 1
assert host.is_max_attempts()
assert host.current_notification_number == 1, 'Raised a notification'
self.assert_actions_count(2)
print(("Host: %s - state: %s/%s, last state update: %s" % (
host, host.state_type, host.state, host.last_state_update)))
# Time warp 1 hour
frozen_datetime.tick(delta=datetime.timedelta(hours=1))
self.external_command_loop()
time.sleep(0.1)
assert host.freshness_expired
assert "DOWN" == host.state
assert "HARD" == host.state_type
# Perharps that attempt should have been incremented?
assert host.attempt == 1
assert host.max_check_attempts == 1
assert host.is_max_attempts()
# Notification for the host and the service
assert host.current_notification_number == 2, 'We should have 2 notifications'
self.show_actions()
self.show_logs()
# 2 actions
# * 1 - VOID = notification master
# * 2 - notifier.pl to test_contact
# * 3 - notifier.pl to test_contact
self.assert_actions_count(3)
def test_notifications_with_delay(self):
""" Test notifications with use property first_notification_delay
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname(
"test_host_0", "test_ok_0")
svc.first_notification_delay = 0.1 # set 6s for first notification delay
svc.notification_interval = 0.1 / 6 # and send immediately then (1 second)
svc.checks_in_progress = []
svc.act_depend_of = [] # no host_checks on critical check_results
svc.event_handler_enabled = False
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
time.sleep(0.1)
assert 0 == svc.current_notification_number
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
self.assert_actions_count(0)
time.sleep(0.1)
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
assert "HARD" == svc.state_type
self.assert_actions_count(1)
time.sleep(7)
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
self.show_actions()
self.assert_actions_count(2)
self.assert_actions_match(0, 'serviceoutput WARNING', 'command')
self.assert_actions_match(0, 'HOSTNOTIFICATIONNUMBER=1, SERVICENOTIFICATIONNUMBER=1', 'command')
self.assert_actions_match(1, 'VOID', 'command')
assert svc.last_time_critical == 0
assert svc.last_time_unknown == 0
assert svc.last_time_warning > 0
assert svc.last_time_ok > 0
time.sleep(2)
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
self.show_actions()
self.assert_actions_count(3)
self.assert_actions_match(0, 'serviceoutput WARNING', 'command')
self.assert_actions_match(0, 'HOSTNOTIFICATIONNUMBER=1, SERVICENOTIFICATIONNUMBER=1', 'command')
# One more notification!
self.assert_actions_match(1, 'serviceoutput WARNING', 'command')
self.assert_actions_match(1, 'HOSTNOTIFICATIONNUMBER=2, SERVICENOTIFICATIONNUMBER=2', 'command')
self.assert_actions_match(2, 'VOID', 'command')
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
assert 3 == svc.current_notification_number
self.assert_actions_count(4)
assert svc.last_time_unknown == 0
assert svc.last_time_warning > 0
assert svc.last_time_critical > 0
assert svc.last_time_ok > 0
time.sleep(7)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
assert 4 == svc.current_notification_number
self.show_actions()
self.assert_actions_count(5)
self.assert_actions_match(0, 'serviceoutput WARNING', 'command')
self.assert_actions_match(0, 'HOSTNOTIFICATIONNUMBER=1, SERVICENOTIFICATIONNUMBER=1', 'command')
self.assert_actions_match(1, 'serviceoutput WARNING', 'command')
self.assert_actions_match(1, 'HOSTNOTIFICATIONNUMBER=2, SERVICENOTIFICATIONNUMBER=2', 'command')
# One more notification!
self.assert_actions_match(2, 'serviceoutput CRITICAL', 'command')
self.assert_actions_match(2, 'HOSTNOTIFICATIONNUMBER=3, SERVICENOTIFICATIONNUMBER=3', 'command')
self.assert_actions_match(3, 'serviceoutput CRITICAL', 'command')
self.assert_actions_match(3, 'HOSTNOTIFICATIONNUMBER=4, SERVICENOTIFICATIONNUMBER=4', 'command')
self.assert_actions_match(4, 'VOID', 'command')
assert 5 == len(svc.notifications_in_progress)
self.scheduler_loop(1, [[svc, 0, 'OK']])
time.sleep(7)
self.scheduler_loop(1, [[svc, 0, 'OK']])
assert 0 == svc.current_notification_number
self.assert_actions_count(5)
def test_notifications_outside_period(self):
""" Test when we are not in notification_period, so do not send notifications
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname(
"test_host_0", "test_ok_0")
# To make tests quicker we make notifications send very quickly (1 second)
svc.notification_interval = 0.1 / 6
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
timeperiod = self._scheduler.timeperiods.find_by_name('none')
svc.notification_period = timeperiod.uuid
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
time.sleep(0.1)
assert 0 == svc.current_notification_number, 'All OK no notifications'
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
time.sleep(0.1)
assert "SOFT" == svc.state_type
assert 0 == svc.current_notification_number, 'Critical SOFT, no notifications'
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
time.sleep(0.1)
assert "HARD" == svc.state_type
assert 0 == svc.current_notification_number, 'Critical HARD, no notifications'
# Only a master notification but no real one!
self.assert_actions_count(1)
self.assert_actions_match(0, 'VOID', 'command')
self.assert_actions_match(0, 'PROBLEM', 'type')
self.scheduler_loop(1, [[svc, 0, 'OK']])
time.sleep(0.1)
assert 0 == svc.current_notification_number
self.show_actions()
# Only a master notification but no real one!
self.assert_actions_count(1)
self.assert_actions_match(0, 'VOID', 'command')
self.assert_actions_match(0, 'RECOVERY', 'type')
def test_notifications_ack(self):
""" Test notifications not sent when an acknowledge is set
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname(
"test_host_0", "test_ok_0")
# To make tests quicker we make notifications send very quickly (1 second)
svc.notification_interval = 0.1 / 6
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
# Freeze the time !
initial_datetime = datetime.datetime(year=2018, month=6, day=1,
hour=18, minute=30, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert 0 == svc.current_notification_number, 'All OK no notifications'
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "SOFT" == svc.state_type
assert 0 == svc.current_notification_number, 'Critical SOFT, no notifications'
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "HARD" == svc.state_type
assert 1 == svc.current_notification_number, \
'Critical HARD, must have 1 notification'
self.show_actions()
self.assert_actions_count(2)
self.assert_actions_match(0,
'--notificationtype PROBLEM --servicestate CRITICAL',
'command')
self.assert_actions_match(1,
'VOID',
'command')
# Time warp 5 minutes
frozen_datetime.tick(delta=datetime.timedelta(minutes=5))
now = int(time.time())
cmd = "[{0}] ACKNOWLEDGE_SVC_PROBLEM;{1};{2};{3};{4};{5};{6};{7}\n".\
format(now, svc.host_name, svc.service_description, 1, 1, 1, 'darth vader',
'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1)
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "HARD" == svc.state_type
assert 1 == svc.current_notification_number, \
'Critical HARD and ack, but must have 1 notification'
self.show_actions()
self.assert_actions_count(3)
self.assert_actions_match(0,
'--notificationtype PROBLEM --servicestate CRITICAL',
'command')
self.assert_actions_match(1,
'--notificationtype ACKNOWLEDGEMENT',
'command')
self.assert_actions_match(2,
'VOID',
'command')
# Time warp 5 minutes
frozen_datetime.tick(delta=datetime.timedelta(minutes=5))
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
self.scheduler_loop(1)
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "HARD" == svc.state_type
assert 1 == svc.current_notification_number, \
'Critical HARD, must have 1 notification'
self.assert_actions_count(3)
# Time warp 5 minutes
frozen_datetime.tick(delta=datetime.timedelta(minutes=5))
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "HARD" == svc.state_type
assert 3 == svc.current_notification_number, \
'Warning HARD, must have 3 notifications'
self.show_actions()
# TODO: 2 warning notifications raised ! Looks strange !!!! But seems correct...
self.assert_actions_count(5)
self.assert_actions_match(0,
'--notificationtype PROBLEM --servicestate CRITICAL',
'command')
self.assert_actions_match(1,
'--notificationtype ACKNOWLEDGEMENT',
'command')
self.assert_actions_match(2,
'--notificationtype PROBLEM --servicestate WARNING',
'command')
self.assert_actions_match(3,
'--notificationtype PROBLEM --servicestate WARNING',
'command')
self.assert_actions_match(4,
'VOID',
'command')
def test_notifications_downtime(self):
""" Test notifications not sent when a downtime is scheduled
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname(
"test_host_0", "test_ok_0")
# To make tests quicker we make notifications send very quickly (1 second)
svc.notification_interval = 0.1 / 6
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
time.sleep(0.1)
assert 0 == svc.current_notification_number, 'All OK no notifications'
self.assert_actions_count(0)
now = int(time.time())
cmd = "[{0}] SCHEDULE_SVC_DOWNTIME;{1};{2};{3};{4};{5};{6};{7};{8};{9}\n".\
format(now, svc.host_name, svc.service_description, now, (now + 1000), 1, 0, 0,
'darth vader', 'add downtime for maintenance')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
time.sleep(0.1)
assert "SOFT" == svc.state_type
assert "CRITICAL" == svc.state
assert 0 == svc.current_notification_number, 'Critical SOFT, no notifications'
self.assert_actions_count(1)
self.assert_actions_match(0, 'serviceoutput OK', 'command')
self.assert_actions_match(0, 'notificationtype DOWNTIMESTART', 'command')
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
time.sleep(0.1)
assert "HARD" == svc.state_type
assert 0 == svc.current_notification_number, 'Critical HARD, no notifications'
self.assert_actions_count(2)
self.assert_actions_match(1, 'VOID', 'command')
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
time.sleep(0.1)
self.assert_actions_count(2)
self.scheduler_loop(1, [[svc, 0, 'OK']])
time.sleep(0.1)
assert 0 == svc.current_notification_number
self.assert_actions_count(1)
self.assert_actions_match(0, 'serviceoutput OK', 'command')
self.assert_actions_match(0, 'notificationtype DOWNTIMESTART', 'command')
def test_notifications_no_renotify(self):
""" Test notifications sent only once if configured for this
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
# To make notifications not being re-sent, set this to 0
svc.notification_interval = 0
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
# Freeze the time !
initial_datetime = datetime.datetime(year=2018, month=6, day=1,
hour=18, minute=30, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
time.sleep(1)
assert svc.current_notification_number == 0, 'All OK no notifications'
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
time.sleep(1)
assert "SOFT" == svc.state_type
assert svc.current_notification_number == 0, 'Critical SOFT, no notifications'
self.assert_actions_count(0)
# Time warp
frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1))
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert "HARD" == svc.state_type
self.assert_actions_count(1)
assert svc.current_notification_number == 1, 'Critical HARD, must have 1 ' \
'notification'
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
# No re-notification!
self.assert_actions_count(1)
assert svc.current_notification_number == 1
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
# No re-notification!
self.assert_actions_count(1)
assert svc.current_notification_number == 1
self.show_actions()
# 1st notification for service critical
self.assert_actions_match(
0, 'notifier.pl --hostname test_host_0 --servicedesc test_ok_0 '
'--notificationtype PROBLEM --servicestate CRITICAL --serviceoutput CRITICAL',
'command')
self.assert_actions_match(
0, 'NOTIFICATIONTYPE=PROBLEM, NOTIFICATIONRECIPIENTS=test_contact, '
'NOTIFICATIONISESCALATED=False, NOTIFICATIONAUTHOR=n/a, '
'NOTIFICATIONAUTHORNAME=n/a, NOTIFICATIONAUTHORALIAS=n/a, '
'NOTIFICATIONCOMMENT=n/a, HOSTNOTIFICATIONNUMBER=1, '
'SERVICENOTIFICATIONNUMBER=1',
'command')
|
class TestNotifications(AlignakTest):
'''
This class test notifications
'''
def setUp(self):
pass
def test_0_nonotif(self):
''' Test with notifications disabled in service definition
:return: None
'''
pass
def test_1_nonotif_enable_with_extcmd(self):
''' Test notification disabled in service definition but enabled later
with an external command
:return: None
'''
pass
def test_1_notifications_service_with_no_contacts(self):
''' Test notifications are sent to host contacts for a service with no defined contacts
:return: None
'''
pass
def test_2_notifications(self):
''' Test notifications sent in normal mode
:return: None
'''
pass
def test_3_notifications(self):
''' Test notifications of service states OK -> WARNING -> CRITICAL -> OK
:return: None
'''
pass
def test_4_notifications(self):
''' Test notifications of service states OK -> CRITICAL -> WARNING -> OK
:return: None
'''
pass
def test_notifications_passive_host(self):
''' Test notifications for passively check hosts
:return: None
'''
pass
def test_notifications_with_delay(self):
''' Test notifications with use property first_notification_delay
:return: None
'''
pass
def test_notifications_outside_period(self):
''' Test when we are not in notification_period, so do not send notifications
:return: None
'''
pass
def test_notifications_ack(self):
''' Test notifications not sent when an acknowledge is set
:return: None
'''
pass
def test_notifications_downtime(self):
''' Test notifications not sent when a downtime is scheduled
:return: None
'''
pass
def test_notifications_no_renotify(self):
''' Test notifications sent only once if configured for this
:return: None
'''
pass
| 14 | 13 | 90 | 12 | 62 | 17 | 1 | 0.28 | 1 | 5 | 0 | 0 | 13 | 0 | 13 | 68 | 1,184 | 169 | 811 | 64 | 797 | 229 | 695 | 56 | 681 | 1 | 2 | 1 | 13 |
4,134 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/monitor.py
|
alignak.monitor.MonitorConnection
|
class MonitorConnection(object):
"""Base class for Alignak Web Services client connection"""
def __init__(self, endpoint='http://127.0.0.1:7773/ws'):
if endpoint.endswith('/'): # pragma: no cover - test url is complying ...
self.url_endpoint_root = endpoint[0:-1]
else:
self.url_endpoint_root = endpoint
self.session = requests.Session()
self.session.header = {'Content-Type': 'application/json'}
# Requests HTTP adapters
http_adapter = HTTPAdapter(max_retries=3)
https_adapter = HTTPAdapter(max_retries=3)
self.session.mount('http://', http_adapter)
self.session.mount('https://', https_adapter)
self.authenticated = False
self._token = None
self.timeout = None
logger.info("Alignak monitor, endpoint: %s", self.url_endpoint_root)
def __repr__(self): # pragma: no cover
return '<WS report to %r, authenticated: %r />' \
% (self.url_endpoint_root, self.authenticated)
__str__ = __repr__
def get_url(self, endpoint):
"""
Returns the formated full URL endpoint
:param endpoint: str. the relative endpoint to access
:return: str
"""
return "%s/%s" % (self.url_endpoint_root, endpoint)
def get_response(self, method, endpoint, headers=None, json=None, params=None, data=None):
# pylint: disable=too-many-arguments
"""
Returns the response from the requested endpoint with the requested method
:param method: str. one of the methods accepted by Requests ('POST', 'GET', ...)
:param endpoint: str. the relative endpoint to access
:param params: (optional) Dictionary or bytes to be sent in the query string
for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body
of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:return: Requests.response
"""
logger.debug("Parameters for get_response:")
logger.debug("\t - endpoint: %s", endpoint)
logger.debug("\t - method: %s", method)
logger.debug("\t - headers: %s", headers)
logger.debug("\t - json: %s", json)
logger.debug("\t - params: %s", params)
logger.debug("\t - data: %s", data)
url = self.get_url(endpoint)
# First stage. Errors are connection errors (timeout, no session, ...)
try:
response = self.session.request(method=method, url=url, headers=headers, json=json,
params=params, data=data, timeout=self.timeout)
logger.debug("response headers: %s", response.headers)
logger.debug("response content: %s", response.content)
except RequestException as exp:
response = {"_status": "ERR",
"_error": {"message": exp},
"_issues": {"message": exp}}
return response
@staticmethod
def decode(response):
"""
Decodes and returns the response as JSON (dict) or raise BackendException
:param response: requests.response object
:return: dict
"""
# Second stage. Errors are backend errors (bad login, bad url, ...)
try:
response.raise_for_status()
except requests.HTTPError as exp:
response = {"_status": "ERR",
"_error": {"message": exp, "code": response.status_code},
"_issues": {"message": exp, "code": response.status_code}}
return response
else:
return response.json()
def set_token(self, token):
"""
Set token in authentification for next requests
:param token: str. token to set in auth. If None, reinit auth
"""
if token:
auth = HTTPBasicAuth(token, '')
self._token = token
self.authenticated = True
self.session.auth = auth
logger.debug("Using session token: %s", token)
else:
self._token = None
self.authenticated = False
self.session.auth = None
logger.debug("Session token/auth reinitialised")
def get_token(self):
"""Get the stored backend token"""
return self._token
token = property(get_token, set_token)
def login(self, username, password):
"""
Log into the WS interface and get the authentication token
if login is:
- accepted, returns True
- refused, returns False
In case of any error, raises a BackendException
:param username: login name
:type username: str
:param password: password
:type password: str
:param generate: Can have these values: enabled | force | disabled
:type generate: str
:return: return True if authentication is successfull, otherwise False
:rtype: bool
"""
logger.debug("login for: %s", username)
# Configured as not authenticated WS
if not username and not password:
self.set_token(token=None)
return False
if not username or not password:
logger.error("Username or password cannot be None!")
self.set_token(token=None)
return False
endpoint = 'login'
json = {'username': username, 'password': password}
response = self.get_response(method='POST', endpoint=endpoint, json=json)
if response.status_code == 401:
logger.error("Access denied to %s", self.url_endpoint_root)
self.set_token(token=None)
return False
resp = self.decode(response=response)
if 'token' in resp:
self.set_token(token=resp['token'])
return True
return False # pragma: no cover - unreachable ...
def logout(self):
"""
Logout from the backend
:return: return True if logout is successfull, otherwise False
:rtype: bool
"""
logger.debug("request backend logout")
if not self.authenticated:
logger.warning("Unnecessary logout ...")
return True
endpoint = 'logout'
_ = self.get_response(method='POST', endpoint=endpoint)
self.session.close()
self.set_token(token=None)
return True
def get(self, endpoint, params=None):
"""
Get items or item in alignak backend
If an error occurs, a BackendException is raised.
This method builds a response as a dictionary that always contains: _items and _status::
{
u'_items': [
...
],
u'_status': u'OK'
}
:param endpoint: endpoint (API URL) relative from root endpoint
:type endpoint: str
:param params: parameters for the backend API
:type params: dict
:return: dictionary as specified upper
:rtype: dict
"""
response = self.get_response(method='GET', endpoint=endpoint, params=params)
resp = self.decode(response=response)
if '_status' not in resp: # pragma: no cover - need specific backend tests
resp['_status'] = u'OK' # TODO: Sure??
return resp
def post(self, endpoint, data, files=None, headers=None):
# pylint: disable=unused-argument
"""
Create a new item
:param endpoint: endpoint (API URL)
:type endpoint: str
:param data: properties of item to create
:type data: dict
:param files: Not used. To be implemented
:type files: None
:param headers: headers (example: Content-Type)
:type headers: dict
:return: response (creation information)
:rtype: dict
"""
# We let Requests encode data to json
response = self.get_response(method='POST', endpoint=endpoint, json=data, headers=headers)
resp = self.decode(response=response)
return resp
def patch(self, endpoint, data):
"""
Method to update an item
The headers must include an If-Match containing the object _etag.
headers = {'If-Match': contact_etag}
The data dictionary contain the fields that must be modified.
If the patching fails because the _etag object do not match with the provided one, a
BackendException is raised with code = 412.
If inception is True, this method makes e new get request on the endpoint to refresh the
_etag and then a new patch is called.
If an HTTP 412 error occurs, a BackendException is raised. This exception is:
- code: 412
- message: response content
- response: backend response
All other HTTP error raises a BackendException.
If some _issues are provided by the backend, this exception is:
- code: HTTP error code
- message: response content
- response: JSON encoded backend response (including '_issues' dictionary ...)
If no _issues are provided and an _error is signaled by the backend, this exception is:
- code: backend error code
- message: backend error message
- response: JSON encoded backend response
:param endpoint: endpoint (API URL)
:type endpoint: str
:param data: properties of item to update
:type data: dict
:param headers: headers (example: Content-Type). 'If-Match' required
:type headers: dict
:param inception: if True tries to get the last _etag
:type inception: bool
:return: dictionary containing patch response from the backend
:rtype: dict
"""
response = self.get_response(method='PATCH', endpoint=endpoint, json=data,
headers={'Content-Type': 'application/json'})
if response.status_code == 200:
return self.decode(response=response)
return response
|
class MonitorConnection(object):
'''Base class for Alignak Web Services client connection'''
def __init__(self, endpoint='http://127.0.0.1:7773/ws'):
pass
def __repr__(self):
pass
def get_url(self, endpoint):
'''
Returns the formated full URL endpoint
:param endpoint: str. the relative endpoint to access
:return: str
'''
pass
def get_response(self, method, endpoint, headers=None, json=None, params=None, data=None):
'''
Returns the response from the requested endpoint with the requested method
:param method: str. one of the methods accepted by Requests ('POST', 'GET', ...)
:param endpoint: str. the relative endpoint to access
:param params: (optional) Dictionary or bytes to be sent in the query string
for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body
of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:return: Requests.response
'''
pass
@staticmethod
def decode(response):
'''
Decodes and returns the response as JSON (dict) or raise BackendException
:param response: requests.response object
:return: dict
'''
pass
def set_token(self, token):
'''
Set token in authentification for next requests
:param token: str. token to set in auth. If None, reinit auth
'''
pass
def get_token(self):
'''Get the stored backend token'''
pass
def login(self, username, password):
'''
Log into the WS interface and get the authentication token
if login is:
- accepted, returns True
- refused, returns False
In case of any error, raises a BackendException
:param username: login name
:type username: str
:param password: password
:type password: str
:param generate: Can have these values: enabled | force | disabled
:type generate: str
:return: return True if authentication is successfull, otherwise False
:rtype: bool
'''
pass
def logout(self):
'''
Logout from the backend
:return: return True if logout is successfull, otherwise False
:rtype: bool
'''
pass
def get_url(self, endpoint):
'''
Get items or item in alignak backend
If an error occurs, a BackendException is raised.
This method builds a response as a dictionary that always contains: _items and _status::
{
u'_items': [
...
],
u'_status': u'OK'
}
:param endpoint: endpoint (API URL) relative from root endpoint
:type endpoint: str
:param params: parameters for the backend API
:type params: dict
:return: dictionary as specified upper
:rtype: dict
'''
pass
def post(self, endpoint, data, files=None, headers=None):
'''
Create a new item
:param endpoint: endpoint (API URL)
:type endpoint: str
:param data: properties of item to create
:type data: dict
:param files: Not used. To be implemented
:type files: None
:param headers: headers (example: Content-Type)
:type headers: dict
:return: response (creation information)
:rtype: dict
'''
pass
def patch(self, endpoint, data):
'''
Method to update an item
The headers must include an If-Match containing the object _etag.
headers = {'If-Match': contact_etag}
The data dictionary contain the fields that must be modified.
If the patching fails because the _etag object do not match with the provided one, a
BackendException is raised with code = 412.
If inception is True, this method makes e new get request on the endpoint to refresh the
_etag and then a new patch is called.
If an HTTP 412 error occurs, a BackendException is raised. This exception is:
- code: 412
- message: response content
- response: backend response
All other HTTP error raises a BackendException.
If some _issues are provided by the backend, this exception is:
- code: HTTP error code
- message: response content
- response: JSON encoded backend response (including '_issues' dictionary ...)
If no _issues are provided and an _error is signaled by the backend, this exception is:
- code: backend error code
- message: backend error message
- response: JSON encoded backend response
:param endpoint: endpoint (API URL)
:type endpoint: str
:param data: properties of item to update
:type data: dict
:param headers: headers (example: Content-Type). 'If-Match' required
:type headers: dict
:param inception: if True tries to get the last _etag
:type inception: bool
:return: dictionary containing patch response from the backend
:rtype: dict
'''
pass
| 14 | 11 | 22 | 3 | 9 | 10 | 2 | 1.08 | 1 | 5 | 0 | 0 | 11 | 5 | 12 | 12 | 286 | 54 | 114 | 39 | 100 | 123 | 104 | 36 | 91 | 5 | 1 | 1 | 23 |
4,135 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_metrics.py
|
tests.test_metrics.TestMetricsRun
|
class TestMetricsRun(AlignakTest):
"""
This class tests the inner metrics module running
"""
def setUp(self):
super(TestMetricsRun, self).setUp()
# Log at DEBUG level
self.set_unit_tests_logger_level()
# Create our own stats manager...
# do not use the global object to restart with a fresh one on each test
self.fake_carbon = FakeCarbonServer(host='localhost', port=2004)
def tearDown(self):
super(TestMetricsRun, self).tearDown()
self.fake_carbon.stop()
self.fake_carbon.join()
def test_inner_module_checks_results(self):
""" Test that inner metrics module is pushing data to Graphite
:return: None
"""
self.setup_with_file('cfg/cfg_metrics.cfg',
dispatching=True)
# Module is an internal one (no external process) in the broker daemon modules manager
my_module = self._broker_daemon.modules_manager.instances[0]
assert my_module.is_external is False
my_module.metrics_flush_count = 1
# When the broker daemon receives a Brok, it is propagated to the module
# Host check result
self.clear_logs()
hcr = {
"host_name": "srv001",
"last_time_unreachable": 0,
"last_problem_id": 0,
"passive_check": False,
"retry_interval": 1,
"last_event_id": 0,
"problem_has_been_acknowledged": False,
"command_name": "pm-check_linux_host_alive",
"last_state": "UP",
"latency": 0.2317881584,
"last_state_type": "HARD",
"last_hard_state_change": 1444427108,
"last_time_up": 0,
"percent_state_change": 0.0,
"state": "DOWN",
"last_chk": 1444427104,
"last_state_id": 0,
"end_time": 0,
"timeout": 0,
"current_event_id": 10,
"execution_time": 3.1496069431000002,
"start_time": 0,
"return_code": 2,
"state_type": "SOFT",
"output": "CRITICAL - Plugin timed out after 10 seconds",
"in_checking": True,
"early_timeout": 0,
"in_scheduled_downtime": False,
"attempt": 0,
"state_type_id": 1,
"acknowledgement_type": 1,
"last_state_change": 1444427108.040841,
"last_time_down": 1444427108,
"instance_id": 0,
"long_output": "",
"current_problem_id": 0,
"check_interval": 5,
"state_id": 2,
"has_been_checked": 1,
"perf_data": "uptime=1200;rta=0.049000ms;2.000000;3.000000;0.000000 pl=0%;50;80;0"
}
b = Brok({'data': hcr, 'type': 'host_check_result'})
self._broker_daemon.manage_brok(b)
self.show_logs()
self.assert_log_count(0)
# Service check result
self.clear_logs()
scr = {
"host_name": "srv001",
"service_description": "ping",
"command_name": "ping",
"attempt": 1,
"execution_time": 3.1496069431000002,
"latency": 0.2317881584,
"return_code": 2,
"state": "OK",
"state_type": "HARD",
"state_id": 0,
"state_type_id": 1,
"output": "PING OK - Packet loss = 0%, RTA = 0.05 ms",
"long_output": "Long output ...",
"perf_data": "rta=0.049000ms;2.000000;3.000000;0.000000 pl=0%;50;80;0",
"passive_check": False,
"problem_has_been_acknowledged": False,
"acknowledgement_type": 1,
"in_scheduled_downtime": False,
"last_chk": 1473597375,
"last_state_change": 1444427108.147903,
"last_state_id": 0,
"last_state": "UNKNOWN",
"last_state_type": "HARD",
"last_hard_state_change": 0.0,
"last_time_unknown": 0,
"last_time_unreachable": 0,
"last_time_critical": 1473597376,
"last_time_warning": 0,
"last_time_ok": 0,
"retry_interval": 2,
"percent_state_change": 4.1,
"check_interval": 5,
"in_checking": False,
"early_timeout": 0,
"instance_id": "3ac88dd0c1c04b37a5d181622e93b5bc",
"current_event_id": 1,
"last_event_id": 0,
"current_problem_id": 1,
"last_problem_id": 0,
"timeout": 0,
"has_been_checked": 1,
"start_time": 0,
"end_time": 0
}
b = Brok({'data': scr, 'type': 'service_check_result'})
self._broker_daemon.manage_brok(b)
self.show_logs()
self.assert_log_count(0)
print(my_module.my_metrics)
|
class TestMetricsRun(AlignakTest):
'''
This class tests the inner metrics module running
'''
def setUp(self):
pass
def tearDown(self):
pass
def test_inner_module_checks_results(self):
''' Test that inner metrics module is pushing data to Graphite
:return: None
'''
pass
| 4 | 2 | 46 | 5 | 37 | 3 | 1 | 0.12 | 1 | 3 | 2 | 0 | 3 | 1 | 3 | 58 | 145 | 19 | 113 | 9 | 109 | 13 | 27 | 9 | 23 | 1 | 2 | 0 | 3 |
4,136 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/log.py
|
alignak.log.UTCFormatter
|
class UTCFormatter(logging.Formatter):
"""This logging formatter converts the log date/time to UTC"""
converter = time.gmtime
|
class UTCFormatter(logging.Formatter):
'''This logging formatter converts the log date/time to UTC'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 3 | 0 | 2 | 2 | 1 | 1 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
4,137 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/macroresolver.py
|
alignak.macroresolver.MacroResolver
|
class MacroResolver(Borg):
"""MacroResolver class is used to resolve macros (in command call). See above for details"""
my_type = 'macroresolver'
# Global macros
macros = {
'TOTALHOSTS':
'_get_total_hosts',
'TOTALHOSTSUP':
'_get_total_hosts_up',
'TOTALHOSTSDOWN':
'_get_total_hosts_down',
'TOTALHOSTSUNREACHABLE':
'_get_total_hosts_unreachable',
'TOTALHOSTSDOWNUNHANDLED':
'_get_total_hosts_down_unhandled',
'TOTALHOSTSUNREACHABLEUNHANDLED':
'_get_total_hosts_unreachable_unhandled',
'TOTALHOSTPROBLEMS':
'_get_total_hosts_problems',
'TOTALHOSTPROBLEMSUNHANDLED':
'_get_total_hosts_problems_unhandled',
'TOTALSERVICES':
'_get_total_services',
'TOTALSERVICESOK':
'_get_total_services_ok',
'TOTALSERVICESWARNING':
'_get_total_services_warning',
'TOTALSERVICESCRITICAL':
'_get_total_services_critical',
'TOTALSERVICESUNKNOWN':
'_get_total_services_unknown',
'TOTALSERVICESUNREACHABLE':
'_get_total_services_unreachable',
'TOTALSERVICESWARNINGUNHANDLED':
'_get_total_services_warning_unhandled',
'TOTALSERVICESCRITICALUNHANDLED':
'_get_total_services_critical_unhandled',
'TOTALSERVICESUNKNOWNUNHANDLED':
'_get_total_services_unknown_unhandled',
'TOTALSERVICEPROBLEMS':
'_get_total_services_problems',
'TOTALSERVICEPROBLEMSUNHANDLED':
'_get_total_services_problems_unhandled',
'LONGDATETIME':
'_get_long_date_time',
'SHORTDATETIME':
'_get_short_date_time',
'DATE':
'_get_date',
'TIME':
'_get_time',
'TIMET':
'_get_timet',
'PROCESSSTARTTIME':
'_get_process_start_time',
'EVENTSTARTTIME':
'_get_events_start_time',
}
output_macros = [
'HOSTOUTPUT',
'HOSTPERFDATA',
'HOSTACKAUTHOR',
'HOSTACKCOMMENT',
'SERVICEOUTPUT',
'SERVICEPERFDATA',
'SERVICEACKAUTHOR',
'SERVICEACKCOMMENT'
]
def init(self, conf):
"""Initialize MacroResolver instance with conf.
Must be called at least once.
:param conf: configuration to load
:type conf: alignak.objects.Config
:return: None
"""
# For searching class and elements for on-demand
# we need link to types
self.my_conf = conf
self.lists_on_demand = []
self.hosts = self.my_conf.hosts
# For special void host_name handling...
self.host_class = self.hosts.inner_class
self.lists_on_demand.append(self.hosts)
self.services = self.my_conf.services
self.contacts = self.my_conf.contacts
self.lists_on_demand.append(self.contacts)
self.hostgroups = self.my_conf.hostgroups
self.lists_on_demand.append(self.hostgroups)
self.commands = self.my_conf.commands
self.servicegroups = self.my_conf.servicegroups
self.lists_on_demand.append(self.servicegroups)
self.contactgroups = self.my_conf.contactgroups
self.lists_on_demand.append(self.contactgroups)
self.illegal_macro_output_chars = self.my_conf.illegal_macro_output_chars
self.env_prefix = self.my_conf.env_variables_prefix
@staticmethod
def _get_macros(chain):
"""Get all macros of a chain
Cut '$' char and create a dict with the following structure::
{ 'MacroSTR1' : {'val': '', 'type': 'unknown'}
'MacroSTR2' : {'val': '', 'type': 'unknown'}
}
:param chain: chain to parse
:type chain: str
:return: dict with macro parsed as key
:rtype: dict
"""
regex = re.compile(r'(\$)')
elts = regex.split(chain)
macros = {}
in_macro = False
for elt in elts:
if elt == '$':
in_macro = not in_macro
elif in_macro:
macros[elt] = {'val': '', 'type': 'unknown'}
return macros
def _get_value_from_element(self, elt, prop):
# pylint: disable=too-many-return-statements
"""Get value from an element's property.
the property may be a function to call.
If the property is not resolved (because not implemented), this function will return 'n/a'
:param elt: element
:type elt: object
:param prop: element property
:type prop: str
:return: getattr(elt, prop) or getattr(elt, prop)() (call)
:rtype: str
"""
args = None
# We have args to provide to the function
if isinstance(prop, tuple):
prop, args = prop
value = getattr(elt, prop, None)
if value is None:
return 'n/a'
try:
# If the macro is set to a list property
if isinstance(value, list):
# Return the list items, comma separated and bracketed
return "[%s]" % ','.join(value)
# If the macro is not set as a function to call
if not isinstance(value, collections.Callable):
return value
# Case of a function call with no arguments
if not args:
return value()
# Case where we need args to the function
# ex : HOSTGROUPNAME (we need hostgroups)
# ex : SHORTSTATUS (we need hosts and services if bp_rule)
real_args = []
for arg in args:
real_args.append(getattr(self, arg, None))
return value(*real_args)
except AttributeError:
# Commented because there are many unresolved macros and this log is spamming :/
# # Raise a warning and return a strange value when macro cannot be resolved
# warnings.warn(
# 'Error when getting the property value for a macro: %s',
# MacroWarning, stacklevel=2)
# Return a strange value when macro cannot be resolved
return 'n/a'
except UnicodeError:
if isinstance(value, string_types):
return str(value, 'utf8', errors='ignore')
return 'n/a'
def _delete_unwanted_caracters(self, chain):
"""Remove not wanted char from chain
unwanted char are illegal_macro_output_chars attribute
:param chain: chain to remove char from
:type chain: str
:return: chain cleaned
:rtype: str
"""
try:
chain = chain.decode('utf8', 'replace')
except UnicodeEncodeError:
# If it is still encoded correctly, ignore...
pass
except AttributeError:
# Python 3 will raise an exception because the line is still unicode
pass
for char in self.illegal_macro_output_chars:
chain = chain.replace(char, '')
return chain
def get_env_macros(self, data):
"""Get all environment macros from data
For each object in data ::
* Fetch all macros in object.__class__.macros
* Fetch all customs macros in o.custom
:param data: data to get macro
:type data:
:return: dict with macro name as key and macro value as value
:rtype: dict
"""
env = {}
for obj in data:
cls = obj.__class__
macros = cls.macros
for macro in macros:
if macro.startswith("USER"):
continue
prop = macros[macro]
value = self._get_value_from_element(obj, prop)
env['%s%s' % (self.env_prefix, macro)] = value
if hasattr(obj, 'customs'):
# make NAGIOS__HOSTMACADDR from _MACADDR
for cmacro in obj.customs:
new_env_name = '%s_%s%s' % (self.env_prefix,
obj.__class__.__name__.upper(),
cmacro[1:].upper())
env[new_env_name] = obj.customs[cmacro]
return env
def resolve_simple_macros_in_string(self, c_line, data, macromodulations, timeperiods,
args=None):
# pylint: disable=too-many-locals, too-many-branches, too-many-nested-blocks
"""Replace macro in the command line with the real value
:param c_line: command line to modify
:type c_line: str
:param data: objects list, use to look for a specific macro
:type data:
:param macromodulations: the available macro modulations
:type macromodulations: dict
:param timeperiods: the available timeperiods
:type timeperiods: dict
:param args: args given to the command line, used to get "ARGN" macros.
:type args:
:return: command line with '$MACRO$' replaced with values
:rtype: str
"""
# Now we prepare the classes for looking at the class.macros
data.append(self) # For getting global MACROS
if hasattr(self, 'my_conf'):
data.append(self.my_conf) # For USERN macros
# we should do some loops for nested macros
# like $USER1$ hiding like a ninja in a $ARG2$ Macro. And if
# $USER1$ is pointing to $USER34$ etc etc, we should loop
# until we reach the bottom. So the last loop is when we do
# not still have macros :)
still_got_macros = True
nb_loop = 0
while still_got_macros:
nb_loop += 1
# Ok, we want the macros in the command line
macros = self._get_macros(c_line)
if not macros:
break
# Put in the macros the type of macro for all macros
self._get_type_of_macro(macros, data)
# We can get out if we do not have macros this loop
still_got_macros = True if macros else False
# Now we get values from elements
for macro in macros:
# If type ARGN, look at ARGN cutting
if macros[macro]['type'] == 'ARGN' and args is not None:
macros[macro]['val'] = self._resolve_argn(macro, args)
macros[macro]['type'] = 'resolved'
# If object type, get value from a property
if macros[macro]['type'] == 'object':
obj = macros[macro]['object']
if obj not in data:
continue
prop = obj.macros[macro]
if not prop:
continue
macros[macro]['val'] = self._get_value_from_element(obj, prop)
# Now check if we do not have a 'output' macro. If so, we must
# delete all special characters that can be dangerous
if macro in self.output_macros:
logger.debug("-> macro from: %s, %s = %s", obj, macro, macros[macro])
macros[macro]['val'] = self._delete_unwanted_caracters(macros[macro]['val'])
# If custom type, get value from an object custom variables
if macros[macro]['type'] == 'CUSTOM':
cls_type = macros[macro]['class']
# Beware : only cut the first _HOST or _SERVICE or _CONTACT value,
# so the macro name can have it on it..
macro_name = re.split('_' + cls_type, macro, 1)[1].upper()
logger.debug(" ->: %s - %s", cls_type, macro_name)
# Ok, we've got the macro like MAC_ADDRESS for _HOSTMAC_ADDRESS
# Now we get the element in data that have the type HOST
# and we check if it got the custom value
for elt in data:
if not elt or elt.__class__.my_type.upper() != cls_type:
continue
logger.debug(" : for %s: %s", elt, elt.customs)
if not getattr(elt, 'customs'):
continue
if '_' + macro_name in elt.customs:
macros[macro]['val'] = elt.customs['_' + macro_name]
logger.debug(" : macro %s = %s", macro, macros[macro]['val'])
# Then look on the macromodulations, in reverse order, so
# the last defined will be the first applied
mms = getattr(elt, 'macromodulations', [])
for macromodulation_id in mms[::-1]:
macromodulation = macromodulations[macromodulation_id]
if not macromodulation.is_active(timeperiods):
continue
# Look if the modulation got the value,
# but also if it's currently active
if "_%s" % macro_name in macromodulation.customs:
macros[macro]['val'] = macromodulation.customs["_%s" % macro_name]
# If on-demand type, get value from an dynamic provided data objects
if macros[macro]['type'] == 'ONDEMAND':
macros[macro]['val'] = self._resolve_ondemand(macro, data)
# We resolved all we can, now replace the macros in the command call
for macro in macros:
c_line = c_line.replace("$%s$" % macro, "%s" % (macros[macro]['val']))
# A $$ means we want a $, it's not a macro!
# We replace $$ by a big dirty thing to be sure to not misinterpret it
c_line = c_line.replace("$$", "DOUBLEDOLLAR")
if nb_loop > 32: # too much loop, we exit
still_got_macros = False
# We now replace the big dirty token we made by only a simple $
c_line = c_line.replace("DOUBLEDOLLAR", "$")
return c_line.strip()
def resolve_command(self, com, data, macromodulations, timeperiods):
"""Resolve command macros with data
:param com: check / event handler or command call object
:type com: object
:param data: objects list, used to search for a specific macro (custom or object related)
:type data:
:return: command line with '$MACRO$' replaced with values
:param macromodulations: the available macro modulations
:type macromodulations: dict
:param timeperiods: the available timeperiods
:type timeperiods: dict
:rtype: str
"""
logger.debug("Resolving: macros in: %s, arguments: %s", com.command.command_line, com.args)
return self.resolve_simple_macros_in_string(com.command.command_line, data,
macromodulations, timeperiods,
args=com.args)
@staticmethod
def _get_type_of_macro(macros, objs):
r"""Set macros types
Example::
ARG\d -> ARGN,
HOSTBLABLA -> class one and set Host in class)
_HOSTTOTO -> HOST CUSTOM MACRO TOTO
SERVICESTATEID:srv-1:Load$ -> MACRO SERVICESTATEID of the service Load of host srv-1
:param macros: macros list in a dictionary
:type macros: dict
:param objs: objects list, used to tag object macros
:type objs: list
:return: None
"""
for macro in macros:
# ARGN Macros
if re.match(r'ARG\d', macro):
macros[macro]['type'] = 'ARGN'
continue
# USERN macros
# are managed in the Config class, so no
# need to look that here
elif re.match(r'_HOST\w', macro):
macros[macro]['type'] = 'CUSTOM'
macros[macro]['class'] = 'HOST'
continue
elif re.match(r'_SERVICE\w', macro):
macros[macro]['type'] = 'CUSTOM'
macros[macro]['class'] = 'SERVICE'
# value of macro: re.split('_HOST', '_HOSTMAC_ADDRESS')[1]
continue
elif re.match(r'_CONTACT\w', macro):
macros[macro]['type'] = 'CUSTOM'
macros[macro]['class'] = 'CONTACT'
continue
# On demand macro
elif len(macro.split(':')) > 1:
macros[macro]['type'] = 'ONDEMAND'
continue
# OK, classical macro...
for obj in objs:
if macro in obj.macros:
macros[macro]['type'] = 'object'
macros[macro]['object'] = obj
continue
@staticmethod
# pylint: disable=inconsistent-return-statements
def _resolve_argn(macro, args):
"""Get argument from macro name
ie : $ARG3$ -> args[2]
:param macro: macro to parse
:type macro:
:param args: args given to command line
:type args:
:return: argument at position N-1 in args table (where N is the int parsed)
:rtype: None | str
"""
# first, get the number of args
_id = None
matches = re.search(r'ARG(?P<id>\d+)', macro)
if matches is not None:
_id = int(matches.group('id')) - 1
try:
return args[_id]
except IndexError:
# Required argument not found, returns an empty string
return ''
return ''
def _resolve_ondemand(self, macro, data):
# pylint: disable=too-many-locals
"""Get on demand macro value
If the macro cannot be resolved, this function will return 'n/a' rather than
an empty string, this to alert the caller of a potential problem.
:param macro: macro to parse
:type macro:
:param data: data to get value from
:type data:
:return: macro value
:rtype: str
"""
elts = macro.split(':')
nb_parts = len(elts)
macro_name = elts[0]
# 3 parts for a service, 2 for all others types...
if nb_parts == 3:
val = ''
(host_name, service_description) = (elts[1], elts[2])
# host_name can be void, so it's the host in data
# that is important. We use our self.host_class to
# find the host in the data :)
if host_name == '':
for elt in data:
if elt is not None and elt.__class__ == self.host_class:
host_name = elt.host_name
# Ok now we get service
serv = self.services.find_srv_by_name_and_hostname(host_name, service_description)
if serv is not None:
cls = serv.__class__
prop = cls.macros[macro_name]
val = self._get_value_from_element(serv, prop)
return val
# Ok, service was easy, now hard part
else:
val = ''
elt_name = elts[1]
# Special case: elt_name can be void
# so it's the host where it apply
if elt_name == '':
for elt in data:
if elt is not None and elt.__class__ == self.host_class:
elt_name = elt.host_name
for od_list in self.lists_on_demand:
cls = od_list.inner_class
# We search our type by looking at the macro
if macro_name in cls.macros:
prop = cls.macros[macro_name]
i = od_list.find_by_name(elt_name)
if i is not None:
val = self._get_value_from_element(i, prop)
# Ok we got our value :)
break
return val
# Return a strange value in this case rather than an empty string
return 'n/a'
@staticmethod
def _get_long_date_time():
"""Get long date time
Example : Fri 15 May 11:42:39 CEST 2009
:return: long date local time
:rtype: str
TODO: Should be moved to util
TODO: Should consider timezone
"""
return time.strftime("%a %d %b %H:%M:%S %Z %Y").decode('UTF-8', 'ignore')
@staticmethod
def _get_short_date_time():
"""Get short date time
Example : 10-13-2000 00:30:28
:return: short date local time
:rtype: str
TODO: Should be moved to util
TODO: Should consider timezone
"""
return time.strftime("%d-%m-%Y %H:%M:%S")
@staticmethod
def _get_date():
"""Get date
Example : 10-13-2000
:return: local date
:rtype: str
TODO: Should be moved to util
TODO: Should consider timezone
"""
return time.strftime("%d-%m-%Y")
@staticmethod
def _get_time():
"""Get date time
Example : 00:30:28
:return: date local time
:rtype: str
TODO: Should be moved to util
TODO: Should consider timezone
"""
return time.strftime("%H:%M:%S")
@staticmethod
def _get_timet():
"""Get epoch time
Example : 1437143291
:return: timestamp
:rtype: str
TODO: Should be moved to util
TODO: Should consider timezone
"""
return str(int(time.time()))
def _tot_hosts_by_state(self, state=None, state_type=None):
"""Generic function to get the number of host in the specified state
:param state: state to filter on
:type state: str
:param state_type: state type to filter on (HARD, SOFT)
:type state_type: str
:return: number of host in state *state*
:rtype: int
"""
if state is None and state_type is None:
return len(self.hosts)
if state_type:
return sum(1 for h in self.hosts if h.state == state and h.state_type == state_type)
return sum(1 for h in self.hosts if h.state == state)
def _tot_unhandled_hosts_by_state(self, state):
"""Generic function to get the number of unhandled problem hosts in the specified state
:param state: state to filter on
:type state:
:return: number of host in state *state* and which are not acknowledged problems
:rtype: int
"""
return sum(1 for h in self.hosts if h.state == state and h.state_type == u'HARD' and
h.is_problem and not h.problem_has_been_acknowledged)
def _get_total_hosts(self, state_type=None):
"""
Get the number of hosts
:return: number of hosts
:rtype: int
"""
return self._tot_hosts_by_state(None, state_type=state_type)
def _get_total_hosts_up(self, state_type=None):
"""
Get the number of hosts up
:return: number of hosts
:rtype: int
"""
return self._tot_hosts_by_state(u'UP', state_type=state_type)
def _get_total_hosts_down(self, state_type=None):
"""
Get the number of hosts down
:return: number of hosts
:rtype: int
"""
return self._tot_hosts_by_state(u'DOWN', state_type=state_type)
def _get_total_hosts_down_unhandled(self):
"""
Get the number of down hosts not handled
:return: Number of hosts down and not handled
:rtype: int
"""
return self._tot_unhandled_hosts_by_state(u'DOWN')
def _get_total_hosts_unreachable(self, state_type=None):
"""
Get the number of hosts unreachable
:return: number of hosts
:rtype: int
"""
return self._tot_hosts_by_state(u'UNREACHABLE', state_type=state_type)
def _get_total_hosts_unreachable_unhandled(self):
"""
Get the number of unreachable hosts not handled
:return: Number of hosts unreachable and not handled
:rtype: int
"""
return self._tot_unhandled_hosts_by_state(u'UNREACHABLE')
def _get_total_hosts_problems(self):
"""Get the number of hosts that are a problem
:return: number of hosts with is_problem attribute True
:rtype: int
"""
return sum(1 for h in self.hosts if h.is_problem)
def _get_total_hosts_problems_unhandled(self):
"""
Get the number of host problems not handled
:return: Number of hosts which are problems and not handled
:rtype: int
"""
return sum(1 for h in self.hosts if h.is_problem and not h.problem_has_been_acknowledged)
def _get_total_hosts_problems_handled(self):
"""
Get the number of host problems not handled
:return: Number of hosts which are problems and not handled
:rtype: int
"""
return sum(1 for h in self.hosts if h.is_problem and h.problem_has_been_acknowledged)
def _get_total_hosts_downtimed(self):
"""
Get the number of host in a scheduled downtime
:return: Number of hosts which are downtimed
:rtype: int
"""
return sum(1 for h in self.hosts if h.in_scheduled_downtime)
def _get_total_hosts_not_monitored(self):
"""
Get the number of host not monitored (active and passive checks disabled)
:return: Number of hosts which are not monitored
:rtype: int
"""
return sum(1 for h in self.hosts if not h.active_checks_enabled and
not h.passive_checks_enabled)
def _get_total_hosts_flapping(self):
"""
Get the number of hosts currently flapping
:return: Number of hosts which are not monitored
:rtype: int
"""
return sum(1 for h in self.hosts if h.is_flapping)
def _tot_services_by_state(self, state=None, state_type=None):
"""Generic function to get the number of services in the specified state
:param state: state to filter on
:type state: str
:param state_type: state type to filter on (HARD, SOFT)
:type state_type: str
:return: number of host in state *state*
:rtype: int
TODO: Should be moved
"""
if state is None and state_type is None:
return len(self.services)
if state_type:
return sum(1 for s in self.services if s.state == state and s.state_type == state_type)
return sum(1 for s in self.services if s.state == state)
def _tot_unhandled_services_by_state(self, state):
"""Generic function to get the number of unhandled problem services in the specified state
:param state: state to filter on
:type state:
:return: number of service in state *state* and which are not acknowledged problems
:rtype: int
"""
return sum(1 for s in self.services if s.state == state and
s.is_problem and not s.problem_has_been_acknowledged)
def _get_total_services(self, state_type=None):
"""
Get the number of services
:return: number of services
:rtype: int
"""
return self._tot_services_by_state(None, state_type=state_type)
def _get_total_services_ok(self, state_type=None):
"""
Get the number of services ok
:return: number of services
:rtype: int
"""
return self._tot_services_by_state(u'OK', state_type=state_type)
def _get_total_services_warning(self, state_type=None):
"""
Get the number of services warning
:return: number of services
:rtype: int
"""
return self._tot_services_by_state(u'WARNING', state_type=state_type)
def _get_total_services_critical(self, state_type=None):
"""
Get the number of services critical
:return: number of services
:rtype: int
"""
return self._tot_services_by_state(u'CRITICAL', state_type=state_type)
def _get_total_services_unknown(self, state_type=None):
"""
Get the number of services unknown
:return: number of services
:rtype: int
"""
return self._tot_services_by_state(u'UNKNOWN', state_type=state_type)
def _get_total_services_unreachable(self, state_type=None):
"""
Get the number of services unreachable
:return: number of services
:rtype: int
"""
return self._tot_services_by_state(u'UNREACHABLE', state_type=state_type)
def _get_total_services_warning_unhandled(self):
"""
Get the number of warning services not handled
:return: Number of services warning and not handled
:rtype: int
"""
return self._tot_unhandled_services_by_state(u'WARNING')
def _get_total_services_critical_unhandled(self):
"""
Get the number of critical services not handled
:return: Number of services critical and not handled
:rtype: int
"""
return self._tot_unhandled_services_by_state(u'CRITICAL')
def _get_total_services_unknown_unhandled(self):
"""
Get the number of unknown services not handled
:return: Number of services unknown and not handled
:rtype: int
"""
return self._tot_unhandled_services_by_state(u'UNKNOWN')
def _get_total_services_problems(self):
"""Get the number of services that are a problem
:return: number of services with is_problem attribute True
:rtype: int
"""
return sum(1 for s in self.services if s.is_problem)
def _get_total_services_problems_unhandled(self):
"""Get the number of services that are a problem and that are not acknowledged
:return: number of problem services which are not acknowledged
:rtype: int
"""
return sum(1 for s in self.services if s.is_problem and not s.problem_has_been_acknowledged)
def _get_total_services_problems_handled(self):
"""
Get the number of service problems not handled
:return: Number of services which are problems and not handled
:rtype: int
"""
return sum(1 for s in self.services if s.is_problem and s.problem_has_been_acknowledged)
def _get_total_services_downtimed(self):
"""
Get the number of service in a scheduled downtime
:return: Number of services which are downtimed
:rtype: int
"""
return sum(1 for s in self.services if s.in_scheduled_downtime)
def _get_total_services_not_monitored(self):
"""
Get the number of service not monitored (active and passive checks disabled)
:return: Number of services which are not monitored
:rtype: int
"""
return sum(1 for s in self.services if not s.active_checks_enabled and
not s.passive_checks_enabled)
def _get_total_services_flapping(self):
"""
Get the number of services currently flapping
:return: Number of services which are not monitored
:rtype: int
"""
return sum(1 for s in self.services if s.is_flapping)
@staticmethod
def _get_process_start_time():
"""DOES NOTHING ( Should get process start time)
This function always returns 'n/a' to inform that it is not available
:return: n/a always
:rtype: str
TODO: Implement this
"""
return 'n/a'
@staticmethod
def _get_events_start_time():
"""DOES NOTHING ( Should get events start time)
This function always returns 'n/a' to inform that it is not available
:return: n/a always
:rtype: str
"""
return 'n/a'
|
class MacroResolver(Borg):
'''MacroResolver class is used to resolve macros (in command call). See above for details'''
def init(self, conf):
'''Initialize MacroResolver instance with conf.
Must be called at least once.
:param conf: configuration to load
:type conf: alignak.objects.Config
:return: None
'''
pass
@staticmethod
def _get_macros(chain):
'''Get all macros of a chain
Cut '$' char and create a dict with the following structure::
{ 'MacroSTR1' : {'val': '', 'type': 'unknown'}
'MacroSTR2' : {'val': '', 'type': 'unknown'}
}
:param chain: chain to parse
:type chain: str
:return: dict with macro parsed as key
:rtype: dict
'''
pass
def _get_value_from_element(self, elt, prop):
'''Get value from an element's property.
the property may be a function to call.
If the property is not resolved (because not implemented), this function will return 'n/a'
:param elt: element
:type elt: object
:param prop: element property
:type prop: str
:return: getattr(elt, prop) or getattr(elt, prop)() (call)
:rtype: str
'''
pass
def _delete_unwanted_caracters(self, chain):
'''Remove not wanted char from chain
unwanted char are illegal_macro_output_chars attribute
:param chain: chain to remove char from
:type chain: str
:return: chain cleaned
:rtype: str
'''
pass
def get_env_macros(self, data):
'''Get all environment macros from data
For each object in data ::
* Fetch all macros in object.__class__.macros
* Fetch all customs macros in o.custom
:param data: data to get macro
:type data:
:return: dict with macro name as key and macro value as value
:rtype: dict
'''
pass
def resolve_simple_macros_in_string(self, c_line, data, macromodulations, timeperiods,
args=None):
'''Replace macro in the command line with the real value
:param c_line: command line to modify
:type c_line: str
:param data: objects list, use to look for a specific macro
:type data:
:param macromodulations: the available macro modulations
:type macromodulations: dict
:param timeperiods: the available timeperiods
:type timeperiods: dict
:param args: args given to the command line, used to get "ARGN" macros.
:type args:
:return: command line with '$MACRO$' replaced with values
:rtype: str
'''
pass
def resolve_command(self, com, data, macromodulations, timeperiods):
'''Resolve command macros with data
:param com: check / event handler or command call object
:type com: object
:param data: objects list, used to search for a specific macro (custom or object related)
:type data:
:return: command line with '$MACRO$' replaced with values
:param macromodulations: the available macro modulations
:type macromodulations: dict
:param timeperiods: the available timeperiods
:type timeperiods: dict
:rtype: str
'''
pass
@staticmethod
def _get_type_of_macro(macros, objs):
'''Set macros types
Example::
ARG\d -> ARGN,
HOSTBLABLA -> class one and set Host in class)
_HOSTTOTO -> HOST CUSTOM MACRO TOTO
SERVICESTATEID:srv-1:Load$ -> MACRO SERVICESTATEID of the service Load of host srv-1
:param macros: macros list in a dictionary
:type macros: dict
:param objs: objects list, used to tag object macros
:type objs: list
:return: None
'''
pass
@staticmethod
def _resolve_argn(macro, args):
'''Get argument from macro name
ie : $ARG3$ -> args[2]
:param macro: macro to parse
:type macro:
:param args: args given to command line
:type args:
:return: argument at position N-1 in args table (where N is the int parsed)
:rtype: None | str
'''
pass
def _resolve_ondemand(self, macro, data):
'''Get on demand macro value
If the macro cannot be resolved, this function will return 'n/a' rather than
an empty string, this to alert the caller of a potential problem.
:param macro: macro to parse
:type macro:
:param data: data to get value from
:type data:
:return: macro value
:rtype: str
'''
pass
@staticmethod
def _get_long_date_time():
'''Get long date time
Example : Fri 15 May 11:42:39 CEST 2009
:return: long date local time
:rtype: str
TODO: Should be moved to util
TODO: Should consider timezone
'''
pass
@staticmethod
def _get_short_date_time():
'''Get short date time
Example : 10-13-2000 00:30:28
:return: short date local time
:rtype: str
TODO: Should be moved to util
TODO: Should consider timezone
'''
pass
@staticmethod
def _get_date():
'''Get date
Example : 10-13-2000
:return: local date
:rtype: str
TODO: Should be moved to util
TODO: Should consider timezone
'''
pass
@staticmethod
def _get_time():
'''Get date time
Example : 00:30:28
:return: date local time
:rtype: str
TODO: Should be moved to util
TODO: Should consider timezone
'''
pass
@staticmethod
def _get_timet():
'''Get epoch time
Example : 1437143291
:return: timestamp
:rtype: str
TODO: Should be moved to util
TODO: Should consider timezone
'''
pass
def _tot_hosts_by_state(self, state=None, state_type=None):
'''Generic function to get the number of host in the specified state
:param state: state to filter on
:type state: str
:param state_type: state type to filter on (HARD, SOFT)
:type state_type: str
:return: number of host in state *state*
:rtype: int
'''
pass
def _tot_unhandled_hosts_by_state(self, state):
'''Generic function to get the number of unhandled problem hosts in the specified state
:param state: state to filter on
:type state:
:return: number of host in state *state* and which are not acknowledged problems
:rtype: int
'''
pass
def _get_total_hosts(self, state_type=None):
'''
Get the number of hosts
:return: number of hosts
:rtype: int
'''
pass
def _get_total_hosts_up(self, state_type=None):
'''
Get the number of hosts up
:return: number of hosts
:rtype: int
'''
pass
def _get_total_hosts_down(self, state_type=None):
'''
Get the number of hosts down
:return: number of hosts
:rtype: int
'''
pass
def _get_total_hosts_down_unhandled(self):
'''
Get the number of down hosts not handled
:return: Number of hosts down and not handled
:rtype: int
'''
pass
def _get_total_hosts_unreachable(self, state_type=None):
'''
Get the number of hosts unreachable
:return: number of hosts
:rtype: int
'''
pass
def _get_total_hosts_unreachable_unhandled(self):
'''
Get the number of unreachable hosts not handled
:return: Number of hosts unreachable and not handled
:rtype: int
'''
pass
def _get_total_hosts_problems(self):
'''Get the number of hosts that are a problem
:return: number of hosts with is_problem attribute True
:rtype: int
'''
pass
def _get_total_hosts_problems_unhandled(self):
'''
Get the number of host problems not handled
:return: Number of hosts which are problems and not handled
:rtype: int
'''
pass
def _get_total_hosts_problems_handled(self):
'''
Get the number of host problems not handled
:return: Number of hosts which are problems and not handled
:rtype: int
'''
pass
def _get_total_hosts_downtimed(self):
'''
Get the number of host in a scheduled downtime
:return: Number of hosts which are downtimed
:rtype: int
'''
pass
def _get_total_hosts_not_monitored(self):
'''
Get the number of host not monitored (active and passive checks disabled)
:return: Number of hosts which are not monitored
:rtype: int
'''
pass
def _get_total_hosts_flapping(self):
'''
Get the number of hosts currently flapping
:return: Number of hosts which are not monitored
:rtype: int
'''
pass
def _tot_services_by_state(self, state=None, state_type=None):
'''Generic function to get the number of services in the specified state
:param state: state to filter on
:type state: str
:param state_type: state type to filter on (HARD, SOFT)
:type state_type: str
:return: number of host in state *state*
:rtype: int
TODO: Should be moved
'''
pass
def _tot_unhandled_services_by_state(self, state):
'''Generic function to get the number of unhandled problem services in the specified state
:param state: state to filter on
:type state:
:return: number of service in state *state* and which are not acknowledged problems
:rtype: int
'''
pass
def _get_total_services(self, state_type=None):
'''
Get the number of services
:return: number of services
:rtype: int
'''
pass
def _get_total_services_ok(self, state_type=None):
'''
Get the number of services ok
:return: number of services
:rtype: int
'''
pass
def _get_total_services_warning(self, state_type=None):
'''
Get the number of services warning
:return: number of services
:rtype: int
'''
pass
def _get_total_services_critical(self, state_type=None):
'''
Get the number of services critical
:return: number of services
:rtype: int
'''
pass
def _get_total_services_unknown(self, state_type=None):
'''
Get the number of services unknown
:return: number of services
:rtype: int
'''
pass
def _get_total_services_unreachable(self, state_type=None):
'''
Get the number of services unreachable
:return: number of services
:rtype: int
'''
pass
def _get_total_services_warning_unhandled(self):
'''
Get the number of warning services not handled
:return: Number of services warning and not handled
:rtype: int
'''
pass
def _get_total_services_critical_unhandled(self):
'''
Get the number of critical services not handled
:return: Number of services critical and not handled
:rtype: int
'''
pass
def _get_total_services_unknown_unhandled(self):
'''
Get the number of unknown services not handled
:return: Number of services unknown and not handled
:rtype: int
'''
pass
def _get_total_services_problems(self):
'''Get the number of services that are a problem
:return: number of services with is_problem attribute True
:rtype: int
'''
pass
def _get_total_services_problems_unhandled(self):
'''Get the number of services that are a problem and that are not acknowledged
:return: number of problem services which are not acknowledged
:rtype: int
'''
pass
def _get_total_services_problems_handled(self):
'''
Get the number of service problems not handled
:return: Number of services which are problems and not handled
:rtype: int
'''
pass
def _get_total_services_downtimed(self):
'''
Get the number of service in a scheduled downtime
:return: Number of services which are downtimed
:rtype: int
'''
pass
def _get_total_services_not_monitored(self):
'''
Get the number of service not monitored (active and passive checks disabled)
:return: Number of services which are not monitored
:rtype: int
'''
pass
def _get_total_services_flapping(self):
'''
Get the number of services currently flapping
:return: Number of services which are not monitored
:rtype: int
'''
pass
@staticmethod
def _get_process_start_time():
'''DOES NOTHING ( Should get process start time)
This function always returns 'n/a' to inform that it is not available
:return: n/a always
:rtype: str
TODO: Implement this
'''
pass
@staticmethod
def _get_events_start_time():
'''DOES NOTHING ( Should get events start time)
This function always returns 'n/a' to inform that it is not available
:return: n/a always
:rtype: str
'''
pass
| 59 | 49 | 16 | 2 | 6 | 8 | 2 | 1.02 | 1 | 8 | 0 | 0 | 38 | 12 | 48 | 49 | 892 | 134 | 377 | 138 | 317 | 384 | 290 | 111 | 241 | 22 | 2 | 6 | 114 |
4,138 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/message.py
|
alignak.message.Message
|
class Message(object):
"""This is a simple message class for communications between actionners and workers"""
# Auto generated identifiers
_id = 0
my_type = 'message'
_type = None
_data = None
_from = None
def __init__(self, _type, data=None, source=None):
cls = self.__class__
self._id = cls._id
cls._id += 1
self._type = _type
self._data = data
self._source = source
def get_type(self):
"""Getter of _type attribute
:return: Message type
:rtype: str
"""
return self._type
def get_data(self):
"""Getter of _data attribute
:return: Message data
:rtype: str
"""
return self._data
def get_source(self):
"""Getter of _source attribute
:return: Message from (actionner/worker name)
:rtype: str
"""
return self._source
def __str__(self): # pragma: no cover
"""String representation of message
:return: source - id, type: %s, data: %s
:rtype: str
"""
return "%s - %s, type: %s, data: %s" % (self._source, self._id, self._type, self._data)
|
class Message(object):
'''This is a simple message class for communications between actionners and workers'''
def __init__(self, _type, data=None, source=None):
pass
def get_type(self):
'''Getter of _type attribute
:return: Message type
:rtype: str
'''
pass
def get_data(self):
'''Getter of _data attribute
:return: Message data
:rtype: str
'''
pass
def get_source(self):
'''Getter of _source attribute
:return: Message from (actionner/worker name)
:rtype: str
'''
pass
def __str__(self):
'''String representation of message
:return: source - id, type: %s, data: %s
:rtype: str
'''
pass
| 6 | 5 | 7 | 1 | 3 | 3 | 1 | 0.9 | 1 | 0 | 0 | 0 | 5 | 1 | 5 | 5 | 51 | 12 | 21 | 13 | 15 | 19 | 21 | 13 | 15 | 1 | 1 | 0 | 5 |
4,139 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_hostgroup.py
|
tests.test_hostgroup.TestHostGroup
|
class TestHostGroup(AlignakTest):
"""
This class tests the hostgroups
"""
def setUp(self):
super(TestHostGroup, self).setUp()
def test_hostgroup(self):
"""
Default configuration has no loading problems ... as of it hostgroups are parsed correctly
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg')
assert self.conf_is_correct
def test_multiple_hostgroup_definition(self):
"""
No error when the same group is defined twice in an host/service or
when a host/service is defined twice in a group
:return: None
"""
self.setup_with_file('cfg/hostgroup/multiple_hostgroup.cfg')
print("Get the hosts and services")
host = self._arbiter.conf.hosts.find_by_name("will crash")
assert host is not None
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname(
"will crash", "Crash")
assert svc is not None
grp = self._arbiter.conf.hostgroups.find_by_name("hg-sample")
assert grp is not None
assert host.uuid in grp.members
grp = self._arbiter.conf.servicegroups.find_by_name("Crashed")
assert grp is not None
assert svc.uuid in grp.members
def test_multiple_not_hostgroup_definition(self):
"""
No error when the same group is defined twice in an host/service
:return: None
"""
self.setup_with_file('cfg/hostgroup/multiple_not_hostgroup.cfg')
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname(
"hst_in_BIG", "THE_SERVICE")
assert svc is not None
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname(
"hst_in_IncludeLast", "THE_SERVICE")
assert svc is not None
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname(
"hst_in_NotOne", "THE_SERVICE")
# Not present!
assert svc is None
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname(
"hst_in_NotTwo", "THE_SERVICE")
# Not present!
assert svc is None
def test_bad_hostgroup(self):
""" Test bad hostgroups in the configuration
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/cfg_bad_hostgroup.cfg')
# Configuration is not ok
assert self.conf_is_correct == False
# 5 error messages, bad hostgroup member
assert len(self.configuration_errors) == 4
# No warning messages
assert len(self.configuration_warnings) == 0
# Error is an unknown member in a group (\ escape the [ and ' ...)
self.assert_any_cfg_log_match(re.escape(
"[hostgroup::allhosts_bad] as hostgroup, got unknown member \'BAD_HOST\'"
))
self.assert_any_cfg_log_match(re.escape(
"[hostgroup::allhosts_bad] Configuration is incorrect; from: "
))
self.assert_any_cfg_log_match(re.escape(
"The hostgroup 'allhosts_bad_realm' is affected to an unknown realm: 'Unknown'"
))
self.assert_any_cfg_log_match(re.escape(
"[hostgroup::allhosts_bad] as hostgroup, got unknown member 'BAD_HOST'"
))
self.assert_any_cfg_log_match(
"hostgroups configuration is incorrect!"
)
def test_look_for_alias(self):
""" Hostgroups alias
:return: None
"""
self.setup_with_file('cfg/hostgroup/alignak_groups_with_no_alias.cfg')
# Found a hostgroup named NOALIAS
hg = self._arbiter.conf.hostgroups.find_by_name("NOALIAS")
assert isinstance(hg, Hostgroup)
assert hg.get_name() == "NOALIAS"
assert hg.alias == ""
def test_hostgroup_members(self):
""" Test if members are linked from group
:return: None
"""
self.setup_with_file('cfg/hostgroup/alignak_hostgroup_members.cfg')
# Found a hostgroup named allhosts_and_groups
hg = self._arbiter.conf.hostgroups.find_by_name("allhosts_and_groups")
assert isinstance(hg, Hostgroup)
assert hg.get_name() == "allhosts_and_groups"
assert len(self._arbiter.conf.hostgroups.get_members_of_group("allhosts_and_groups")) == \
2
assert len(hg.hostgroup_members) == 4
assert len(hg.get_hostgroup_members()) == 4
assert len(hg.get_hosts()) == 2
def test_members_hostgroup(self):
""" Test if group is linked from the member
:return: None
"""
self.setup_with_file('cfg/hostgroup/alignak_hostgroup_members.cfg')
# Found a hostgroup named allhosts_and_groups
hg = self._arbiter.conf.hostgroups.find_by_name("allhosts_and_groups")
assert isinstance(hg, Hostgroup)
assert hg.get_name() == "allhosts_and_groups"
assert len(self._arbiter.conf.hostgroups.get_members_of_group("allhosts_and_groups")) == 2
assert len(hg.get_hosts()) == 2
print("List hostgroup hosts:")
for host_id in hg.members:
host = self._arbiter.conf.hosts[host_id]
print(("Host: %s" % host))
assert isinstance(host, Host)
if host.get_name() == 'test_router_0':
assert len(host.get_hostgroups()) == 3
for group_id in host.hostgroups:
group = self._arbiter.conf.hostgroups[group_id]
print(("Group: %s" % group))
assert group.get_name() in [
'router', 'allhosts', 'allhosts_and_groups'
]
if host.get_name() == 'test_host_0':
assert len(host.get_hostgroups()) == 4
for group_id in host.hostgroups:
group = self._arbiter.conf.hostgroups[group_id]
print(("Group: %s" % group))
assert group.get_name() in [
'allhosts', 'allhosts_and_groups', 'up', 'hostgroup_01'
]
assert len(hg.get_hostgroup_members()) == 4
print("List hostgroup groups:")
for group in hg.get_hostgroup_members():
print(("Group: %s" % group))
assert group in [
'hostgroup_01', 'hostgroup_02', 'hostgroup_03', 'hostgroup_04'
]
def test_hostgroup_with_no_host(self):
""" Allow hostgroups with no hosts
:return: None
"""
self.setup_with_file('cfg/hostgroup/alignak_hostgroup_no_host.cfg')
# Found a hostgroup named void
hg = self._arbiter.conf.hostgroups.find_by_name("void")
assert isinstance(hg, Hostgroup)
assert hg.get_name() == "void"
assert len(self._arbiter.conf.hostgroups.get_members_of_group("void")) == 0
assert len(hg.get_hostgroup_members()) == 0
assert len(hg.get_hosts()) == 0
def test_hostgroup_with_space(self):
""" Test that hostgroups can have a name with spaces
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg')
self.nb_hostgroups = len(self._arbiter.conf.hostgroups)
self.setup_with_file('cfg/hostgroup/alignak_hostgroup_with_space.cfg')
# Two more groups than the default configuration
assert len(self._arbiter.conf.hostgroups) == self.nb_hostgroups + 2
assert self._arbiter.conf.hostgroups.find_by_name("test_With Spaces").get_name() == \
"test_With Spaces"
assert self._arbiter.conf.hostgroups.get_members_of_group(
"test_With Spaces") is not []
assert self._arbiter.conf.hostgroups.find_by_name("test_With another Spaces").get_name() == \
"test_With another Spaces"
assert self._arbiter.conf.hostgroups.get_members_of_group(
"test_With another Spaces") is not []
def test_service_hostgroup(self):
"""Test hosts services inherited from a hostgroups property in service definition
:return: None
"""
self.setup_with_file('cfg/hostgroup/hostgroups_from_service.cfg')
# Search a hostgroup named tcp_hosts
hg = self._arbiter.conf.hostgroups.find_by_name("tcp_hosts")
assert isinstance(hg, Hostgroup)
assert len(self._arbiter.conf.hostgroups.get_members_of_group("tcp_hosts")) == 3
assert len(hg.members) == 3
assert len(hg.hostgroup_members) == 0
assert len(hg.get_hosts()) == 3
print("Hostgroup hosts:")
for host_id in hg.members:
host = self._arbiter.conf.hosts[host_id]
print("- host: %s" % host.get_name())
assert len(host.services) > 0
for service_uuid in host.services:
service = self._arbiter.conf.services[service_uuid]
print(" has a service: %s" % service.get_name())
assert 'TCP' == service.get_name()
|
class TestHostGroup(AlignakTest):
'''
This class tests the hostgroups
'''
def setUp(self):
pass
def test_hostgroup(self):
'''
Default configuration has no loading problems ... as of it hostgroups are parsed correctly
:return: None
'''
pass
def test_multiple_hostgroup_definition(self):
'''
No error when the same group is defined twice in an host/service or
when a host/service is defined twice in a group
:return: None
'''
pass
def test_multiple_not_hostgroup_definition(self):
'''
No error when the same group is defined twice in an host/service
:return: None
'''
pass
def test_bad_hostgroup(self):
''' Test bad hostgroups in the configuration
:return: None
'''
pass
def test_look_for_alias(self):
''' Hostgroups alias
:return: None
'''
pass
def test_hostgroup_members(self):
''' Test if members are linked from group
:return: None
'''
pass
def test_members_hostgroup(self):
''' Test if group is linked from the member
:return: None
'''
pass
def test_hostgroup_with_no_host(self):
''' Allow hostgroups with no hosts
:return: None
'''
pass
def test_hostgroup_with_space(self):
''' Test that hostgroups can have a name with spaces
:return: None
'''
pass
def test_service_hostgroup(self):
'''Test hosts services inherited from a hostgroups property in service definition
:return: None
'''
pass
| 12 | 11 | 20 | 3 | 13 | 4 | 2 | 0.34 | 1 | 2 | 0 | 0 | 11 | 1 | 11 | 66 | 237 | 44 | 144 | 30 | 132 | 49 | 118 | 30 | 106 | 7 | 2 | 3 | 19 |
4,140 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_illegal_names.py
|
tests.test_illegal_names.TestIllegalNames
|
class TestIllegalNames(AlignakTest):
"""
This class test illegal characters in configuration
"""
def setUp(self):
super(TestIllegalNames, self).setUp()
def test_illegal_character_in_names(self):
""" Test illegal characters in host_name
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg')
illegal_characts = self._arbiter.conf.illegal_object_name_chars
print("Illegal characters: %s" % illegal_characts)
host = self._arbiter.conf.hosts.find_by_name("test_host_0")
print("Host: %s" % host.__dict__)
# should be correct
assert host.is_correct()
# Now change the name with incorrect caract
host.configuration_errors = []
for charact in illegal_characts:
host.host_name = 'test_host_0' + charact
# and Now I want an incorrect here
assert False == host.is_correct()
print(host.configuration_errors)
assert host.configuration_errors == [
'[host::test_host_0`] host_name contains an illegal character: `',
'[host::test_host_0~] host_name contains an illegal character: ~',
'[host::test_host_0!] host_name contains an illegal character: !',
'[host::test_host_0$] host_name contains an illegal character: $',
'[host::test_host_0%] host_name contains an illegal character: %',
'[host::test_host_0^] host_name contains an illegal character: ^',
'[host::test_host_0&] host_name contains an illegal character: &',
'[host::test_host_0*] host_name contains an illegal character: *',
'[host::test_host_0"] host_name contains an illegal character: "',
'[host::test_host_0|] host_name contains an illegal character: |',
"[host::test_host_0'] host_name contains an illegal character: '",
'[host::test_host_0<] host_name contains an illegal character: <',
'[host::test_host_0>] host_name contains an illegal character: >',
'[host::test_host_0?] host_name contains an illegal character: ?',
'[host::test_host_0,] host_name contains an illegal character: ,',
'[host::test_host_0(] host_name contains an illegal character: (',
'[host::test_host_0)] host_name contains an illegal character: )',
'[host::test_host_0=] host_name contains an illegal character: =']
# test special cases manually to be sure
host.configuration_errors=[]
for charact in ['!']:
host.host_name = 'test_host_0' + charact
# and Now I want an incorrect here
assert False == host.is_correct()
assert host.configuration_errors == [
'[host::test_host_0!] host_name contains an illegal character: !'
]
|
class TestIllegalNames(AlignakTest):
'''
This class test illegal characters in configuration
'''
def setUp(self):
pass
def test_illegal_character_in_names(self):
''' Test illegal characters in host_name
:return: None
'''
pass
| 3 | 2 | 26 | 2 | 20 | 4 | 2 | 0.27 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 57 | 56 | 4 | 41 | 6 | 38 | 11 | 21 | 6 | 18 | 3 | 2 | 1 | 4 |
4,141 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_inheritance_and_plus.py
|
tests.test_inheritance_and_plus.TestInheritanceAndPlus
|
class TestInheritanceAndPlus(AlignakTest):
def setUp(self):
super(TestInheritanceAndPlus, self).setUp()
def test_inheritance(self):
"""Test properties inheritance
"""
self.setup_with_file('cfg/cfg_inheritance.cfg')
assert self.conf_is_correct
print("Hosts: ")
pprint(self._arbiter.conf.hosts.__dict__)
for host in self._arbiter.conf.hosts:
print("Host conf: %d / %s\n" % (host.max_check_attempts, host))
print("Services: ")
pprint(self._arbiter.conf.services.__dict__)
print("Contacts: ")
pprint(self._arbiter.conf.contacts.__dict__)
# common objects
tp_24x7 = self._arbiter.conf.timeperiods.find_by_name("24x7")
tp_none = self._arbiter.conf.timeperiods.find_by_name("none")
tp_work = self._arbiter.conf.timeperiods.find_by_name("work")
cgtest = self._arbiter.conf.contactgroups.find_by_name("test_contact")
cgadm = self._arbiter.conf.contactgroups.find_by_name("admins")
cmdsvc = self._arbiter.conf.commands.find_by_name("check_service")
cmdtest = self._arbiter.conf.commands.find_by_name("dummy_command")
# Checks we got the objects we need
assert tp_24x7 is not None
assert tp_work is not None
assert cgtest is not None
assert cgadm is not None
assert cmdsvc is not None
assert cmdtest is not None
# Contacts
c_admin = self._arbiter.conf.contacts.find_by_name("admin")
assert c_admin is not None
# admin inherits from a generic-contact
print(c_admin.tags)
assert c_admin.tags == ['generic-contact']
assert c_admin.email == 'alignak@localhost'
assert c_admin.host_notifications_enabled is True
assert c_admin.service_notifications_enabled is True
c_not_notified = self._arbiter.conf.contacts.find_by_name("no_notif")
assert c_not_notified is not None
# no_notif inherits from a not-notified
print(c_not_notified.tags)
assert 'generic-contact' in c_not_notified.tags
assert 'not_notified' in c_not_notified.tags
assert c_not_notified.email == 'none'
# TODO: uncomment!
# Issue #1024 - contact templates inheritance
assert c_not_notified.host_notifications_enabled is False
assert c_not_notified.service_notifications_enabled is False
# Hosts
test_host_0 = self._arbiter.conf.hosts.find_by_name("test_host_0")
assert test_host_0 is not None
test_router_0 = self._arbiter.conf.hosts.find_by_name("test_router_0")
assert test_router_0 is not None
hst1 = self._arbiter.conf.hosts.find_by_name("test_host_01")
assert hst1 is not None
assert 'srv' in hst1.tags
assert 'generic-host' in hst1.tags
assert hst1.check_period == tp_none.uuid
hst2 = self._arbiter.conf.hosts.find_by_name("test_host_02")
assert hst2 is not None
assert hst2.check_period == tp_work.uuid
# Services
# svc1 = self._arbiter.conf.services.find_by_name("test_host_01/srv-svc")
# svc2 = self._arbiter.conf.services.find_by_name("test_host_02/srv-svc")
# assert svc1 is not None
# assert svc2 is not None
# Inherited services (through hostgroup property)
# Those services are attached to all hosts of an hostgroup and they both
# inherit from the srv-from-hostgroup template
svc12 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_01",
"srv-from-hostgroup")
assert svc12 is not None
# business_impact inherited
assert svc12.business_impact == 5
# maintenance_period none inherited from the service template
assert svc12.maintenance_period == tp_24x7.uuid
# assert svc12.use == ['generic-service']
# Todo: explain why we do not have generic-service in tags ...
assert svc12.tags == []
svc22 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_02",
"srv-from-hostgroup")
# business_impact inherited
assert svc22.business_impact == 5
# maintenance_period none inherited from the service template
assert svc22.maintenance_period == tp_24x7.uuid
assert svc22 is not None
assert svc22.use == ['generic-service']
assert svc22.tags == []
# maintenance_period none inherited...
assert svc22.maintenance_period == tp_24x7.uuid
# Duplicate for each services (generic services for each host inheriting from srv template)
svc1proc1 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_01", "proc proc1")
svc1proc2 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_01", "proc proc2")
svc2proc1 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_02", "proc proc1")
svc2proc2 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_02", "proc proc2")
assert svc1proc1 is not None
assert svc1proc2 is not None
assert svc2proc1 is not None
assert svc2proc2 is not None
def test_inheritance_and_plus(self):
"""Test properties inheritance with + sign
"""
self.setup_with_file('cfg/cfg_inheritance_and_plus.cfg')
assert self.conf_is_correct
self._sched = self._scheduler
# Get the hostgroups
servers = self._arbiter.conf.hostgroups.find_by_name('servers')
assert servers is not None
linux = self._arbiter.conf.hostgroups.find_by_name('linux')
assert linux is not None
dmz = self._arbiter.conf.hostgroups.find_by_name('DMZ')
assert dmz is not None
mysql = self._arbiter.conf.hostgroups.find_by_name('mysql')
assert mysql is not None
# Get the hosts
host1 = self._arbiter.conf.hosts.find_by_name("test-server1")
host2 = self._arbiter.conf.hosts.find_by_name("test-server2")
# HOST 1 is using templates: linux-servers,dmz, so it should be in
# the hostsgroups named "linux" AND "DMZ"
assert len(host1.hostgroups) == 3
assert servers.uuid in host1.hostgroups
assert linux.uuid in host1.hostgroups
assert dmz.uuid in host1.hostgroups
assert mysql.uuid not in host1.hostgroups
# HOST2 is using templates linux-servers,dmz and is hostgroups +mysql,
# so it should be in all three hostgroups
assert linux.uuid in host2.hostgroups
assert dmz.uuid in host2.hostgroups
assert mysql.uuid in host2.hostgroups
# Get the servicegroups
generic = self._arbiter.conf.servicegroups.find_by_name('generic-sg')
assert generic is not None
another = self._arbiter.conf.servicegroups.find_by_name('another-sg')
assert another is not None
# Get the service
service = self._arbiter.conf.services.find_srv_by_name_and_hostname("pack-host", 'CHILDSERV')
assert service is not None
# The service inherits from a template with a service group and it has
# its own +servicegroup so it should be in both groups
assert generic.uuid in service.servicegroups
assert another.uuid in service.servicegroups
# Get another service, built by host/service templates relation
service = self._arbiter.conf.services.find_srv_by_name_and_hostname('pack-host', 'CHECK-123')
assert service is not None
# The service should have inherited the custom variable `_CUSTOM_123` because custom
# variables are always stored in upper case
assert '_CUSTOM_123' in service.customs
|
class TestInheritanceAndPlus(AlignakTest):
def setUp(self):
pass
def test_inheritance(self):
'''Test properties inheritance
'''
pass
def test_inheritance_and_plus(self):
'''Test properties inheritance with + sign
'''
pass
| 4 | 2 | 58 | 9 | 36 | 14 | 1 | 0.38 | 1 | 1 | 0 | 0 | 3 | 1 | 3 | 58 | 179 | 29 | 109 | 34 | 105 | 41 | 107 | 34 | 103 | 2 | 2 | 1 | 4 |
4,142 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_internal_checks.py
|
tests.test_internal_checks.TestInternalChecks
|
class TestInternalChecks(AlignakTest):
"""
This class test internal checks
"""
def setUp(self):
super(TestInternalChecks, self).setUp()
os.environ['ALIGNAK_LOG_CHECKS'] = '1'
def tearDown(self):
del os.environ['ALIGNAK_LOG_CHECKS']
def test_internal_checks(self):
""" Test many internal checks
:return: None
"""
self._run_internal_checks(perf_data=False)
def test_internal_checks_perf_data(self):
""" Test many internal checks with some random performance data
:return: None
"""
self._run_internal_checks(perf_data=True)
def _run_internal_checks(self, perf_data=False):
""" Test many internal checks
:return: None
"""
if 'ALIGNAK_INTERNAL_HOST_PERFDATA' in os.environ:
del os.environ['ALIGNAK_INTERNAL_HOST_PERFDATA']
if 'ALIGNAK_INTERNAL_SERVICE_PERFDATA' in os.environ:
del os.environ['ALIGNAK_INTERNAL_SERVICE_PERFDATA']
# Set environment variables that define a [0 - N] random range for the performance data
if perf_data:
os.environ['ALIGNAK_INTERNAL_HOST_PERFDATA'] = '5'
os.environ['ALIGNAK_INTERNAL_SERVICE_PERFDATA'] = '5'
self.setup_with_file('cfg/cfg_internal_checks.cfg',
dispatching=True)
assert self.conf_is_correct
assert self._scheduler.pushed_conf.log_active_checks is True
host = self._scheduler.hosts.find_by_name("host_6")
assert host.check_interval == 5 # 5 minutes!
assert host.state == 'UP'
assert host.state_id == 0
assert host.last_state == 'PENDING'
assert host.last_state_id == 0
assert host.last_state_change == 0
assert host.last_state_update == 0
assert host.last_hard_state == 'PENDING'
assert host.last_hard_state_id == 0
assert host.last_hard_state_change == 0
# Freeze the time !
initial_datetime = datetime.datetime(year=2018, month=6, day=1,
hour=18, minute=30, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
# 1527877800
now = time.time()
self.scheduler_loop(1)
time.sleep(0.1)
self.show_checks()
print("Checks list:")
checks = list(self._scheduler.checks.values())
for check in checks:
if check.command.startswith("/test"):
continue
print("%s: %s" % (datetime.datetime.utcfromtimestamp(check.t_to_go).
strftime('%Y-%m-%d %H:%M:%S'), check.command))
assert check.creation_time == now
assert check.t_to_go >= now
assert check.t_to_go <= now + (5 * 60)
print("-----\nChecks execution:")
self.clear_logs()
checks_count = len(checks)
# Simulate checks for one quarter
for second in range(0, 1200):
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
if (second % 300) == 0:
print("5 minutes later...")
print("-----")
print("Checks list:")
checks = list(self._scheduler.checks.values())
for check in checks:
if check.command.startswith("/test"):
continue
print("Check: %s" % check)
print("%s: %s - %s"
% (datetime.datetime.utcfromtimestamp(check.t_to_go).
strftime('%Y-%m-%d %H:%M:%S'), check.command, check.perf_data))
if check.command.startswith('_internal') and check.status not in ['scheduled']:
if perf_data:
assert check.perf_data != ''
else:
assert check.perf_data == ''
# The Alignak log contain checks log thanks to the ALIGNAK_LOG_CHECKS env variable!
# self.show_logs()
# self.show_events()
print("Logs:")
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for handler in logger_.handlers:
if not isinstance(handler, CollectorHandler):
continue
for log in handler.collector:
if 'DEBUG:' in log:
continue
if 'next check for ' in log:
continue
print("l: %s" % log)
# Always UP
if 'Internal check: host_0 ' in log:
assert '--ALC-- Internal check: host_0 - ' \
'_internal_host_check;0;I am always Up' in log
continue
if 'check result for host_0,' in log:
assert '--ALC-- check result for host_0, ' \
'exit: 0, output: I am always Up' in log
continue
# Always UNREACHABLE
if 'Internal check: host_1 ' in log:
assert '--ALC-- Internal check: host_1 - ' \
'_internal_host_check;1;I am always Down' in log
continue
if 'check result for host_1,' in log:
assert '--ALC-- check result for host_1, ' \
'exit: 1, output: I am always Down' in log
continue
# Always DOWN
if 'Internal check: host_2 ' in log:
assert '--ALC-- Internal check: host_2 - ' \
'_internal_host_check;2;I am always Down' in log
continue
if 'check result for host_2,' in log:
# state_id is 2 or 1 for an host
assert '--ALC-- check result for host_2, ' \
'exit: 2, output: I am always Down' in log \
or '--ALC-- check result for host_2, exit: 1, output: I am always Down'
continue
# Always UNKNOWN
if 'Internal check: host_3 ' in log:
assert '--ALC-- Internal check: host_3 - ' \
'_internal_host_check;3;I am always Unknown' in log
continue
if 'check result for host_3,' in log:
# state_id is 2 or 1 for an host
assert '--ALC-- check result for host_3, ' \
'exit: 3, output: I am always Unknown' in log \
or '--ALC-- check result for host_3, exit: 1, output: ' \
'I am always Unknown' in log
continue
# Always UNREACHABLE
if 'Internal check: host_4 ' in log:
assert '--ALC-- Internal check: host_4 - ' \
'_internal_host_check;4;I am always Unreachable' in log
continue
if 'check result for host_4,' in log:
assert '--ALC-- check result for host_4, ' \
'exit: 4, output: I am always Unreachable' in log
continue
# Output built by Alignak
if 'Internal check: host_5 ' in log:
assert '--ALC-- Internal check: host_5 - _internal_host_check;0;' in log
continue
if 'check result for host_5,' in log:
assert '--ALC-- check result for host_5, exit: 0, output: ' \
'Host internal check result: 0' in log
continue
# Random exit code
if 'check result for host_6,' in log:
assert \
('--ALC-- check result for host_6, exit: 0, output: '
'Host internal check result: 0' in log) or \
('--ALC-- check result for host_6, exit: 1, output: '
'Host internal check result: 1' in log) or \
('--ALC-- check result for host_6, exit: 2, output: '
'Host internal check result: 2' in log) or \
('--ALC-- check result for host_6, exit: 3, output: '
'Host internal check result: 3' in log)
continue
|
class TestInternalChecks(AlignakTest):
'''
This class test internal checks
'''
def setUp(self):
pass
def tearDown(self):
pass
def test_internal_checks(self):
''' Test many internal checks
:return: None
'''
pass
def test_internal_checks_perf_data(self):
''' Test many internal checks with some random performance data
:return: None
'''
pass
def _run_internal_checks(self, perf_data=False):
''' Test many internal checks
:return: None
'''
pass
| 6 | 4 | 40 | 6 | 29 | 5 | 7 | 0.21 | 1 | 6 | 1 | 0 | 5 | 0 | 5 | 60 | 207 | 32 | 146 | 17 | 139 | 30 | 118 | 16 | 112 | 30 | 2 | 3 | 34 |
4,143 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_last_state_change.py
|
tests.test_last_state_change.TestHostsvcLastStateChange
|
class TestHostsvcLastStateChange(AlignakTest):
def setUp(self):
super(TestHostsvcLastStateChange, self).setUp()
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
def test_host(self):
""" Test the last_state_change of host
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
self.scheduler_loop(1, [[host, 0, 'UP']])
time.sleep(0.2)
# Not yet a state change
assert host.last_state_change == 0
self.scheduler_loop(1, [[host, 0, 'UP']])
time.sleep(0.2)
assert host.last_state_change == 0
before = time.time()
time.sleep(0.1)
self.scheduler_loop(1, [[host, 2, 'DOWN']])
time.sleep(0.1)
after = time.time()
# Integer values !
assert host.last_state_change == int(host.last_state_change)
assert host.last_state_change != 0
assert host.last_state_change >= int(before)
assert host.last_state_change <= int(after)
reference_time = host.last_state_change
self.scheduler_loop(1, [[host, 2, 'DOWN']])
time.sleep(1.1)
assert host.last_state_change == reference_time
time.sleep(1.0)
self.scheduler_loop(1, [[host, 0, 'UP']])
time.sleep(0.2)
assert host.last_state_change > reference_time
def test_host_unreachable(self):
""" Test last_state_change in unreachable mode (in host)
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.event_handler_enabled = False
host.notifications_enabled = False
host_router = self._scheduler.hosts.find_by_name("test_router_0")
host_router.checks_in_progress = []
host_router.event_handler_enabled = False
host_router.notifications_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(1, [[host, 0, 'UP'], [host_router, 0, 'UP'], [svc, 0, 'OK']])
time.sleep(0.1)
assert not host.problem_has_been_acknowledged
self.assert_actions_count(0)
# Not yet a state change
assert host.last_state_change == 0
before = time.time()
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
time.sleep(0.1)
time.sleep(0.1)
after = time.time()
assert "DOWN" == host_router.state
assert "SOFT" == host_router.state_type
assert "UP" == host_router.last_state
assert "HARD" == host_router.last_state_type
assert "UP" == host_router.last_hard_state
# Integer values !
assert host_router.last_state_change == int(host_router.last_state_change)
assert host_router.last_state_change != 0
assert host_router.last_state_change >= int(before)
assert host_router.last_state_change <= int(after)
# The host is still considered as UP
assert "UP" == host.state
assert "HARD" == host.state_type
reference_time = host_router.last_state_change
time.sleep(1.1)
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
time.sleep(0.1)
assert "DOWN" == host_router.state
assert "SOFT" == host_router.state_type
assert "DOWN" == host_router.last_state
assert "SOFT" == host_router.last_state_type
assert "UP" == host_router.last_hard_state
# last_state_change not updated !
assert host_router.last_state_change == reference_time
# The host is still considered as UP
assert "UP" == host.state
assert "HARD" == host.state_type
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
time.sleep(0.1)
assert "DOWN" == host_router.state
assert "HARD" == host_router.state_type
assert "DOWN" == host_router.last_state
assert "SOFT" == host_router.last_state_type
assert "DOWN" == host_router.last_hard_state
# The host is now unreachable
print("Host: %s" % host)
assert "UNREACHABLE" == host.state
assert "HARD" == host.state_type
assert "UP" == host.last_state
assert "HARD" == host.last_state_type
assert "UP" == host.last_hard_state
time.sleep(0.1)
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
# self.scheduler_loop(1, [[host, 2, 'DOWN'], [host_router, 2, 'DOWN']])
time.sleep(0.1)
after = time.time()
assert "DOWN" == host_router.state
assert "HARD" == host_router.state_type
# The host remains unreachable
print("Host: %s" % host)
assert "UNREACHABLE" == host.state
assert "HARD" == host.state_type
assert "UP" == host.last_state
assert "HARD" == host.last_state_type
assert "UP" == host.last_hard_state
# last_state_change not updated for UNREACHABLE state !
assert host.last_state_change == 0
self.scheduler_loop(1, [[host_router, 0, 'UP']])
assert "UP" == host_router.state
assert "HARD" == host_router.state_type
assert "DOWN" == host_router.last_state
assert "HARD" == host_router.last_state_type
assert "UP" == host_router.last_hard_state
assert "UP" == host.state
assert "HARD" == host.state_type
assert "UP" == host.last_state
assert "HARD" == host.last_state_type
assert "UP" == host.last_hard_state
def test_service(self):
""" Test the last_state_change of service
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
time.sleep(0.2)
# Not yet a state change
assert svc.last_state_change == 0
before = time.time()
time.sleep(0.1)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
time.sleep(0.1)
after = time.time()
# Integer values !
assert svc.last_state_change == int(svc.last_state_change)
assert svc.last_state_change != 0
assert svc.last_state_change >= int(before)
assert svc.last_state_change <= int(after)
reference_time = svc.last_state_change
time.sleep(1.1)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
assert svc.last_state_change == reference_time
time.sleep(1.0)
self.scheduler_loop(1, [[svc, 0, 'OK']])
time.sleep(0.2)
assert svc.last_state_change > reference_time
|
class TestHostsvcLastStateChange(AlignakTest):
def setUp(self):
pass
def test_host(self):
''' Test the last_state_change of host
:return: None
'''
pass
def test_host_unreachable(self):
''' Test last_state_change in unreachable mode (in host)
:return: None
'''
pass
def test_service(self):
''' Test the last_state_change of service
:return: None
'''
pass
| 5 | 3 | 50 | 9 | 36 | 7 | 1 | 0.18 | 1 | 2 | 0 | 0 | 4 | 0 | 4 | 59 | 203 | 37 | 144 | 20 | 139 | 26 | 142 | 20 | 137 | 1 | 2 | 0 | 4 |
4,144 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_logging.py
|
tests.test_logging.TestLogging
|
class TestLogging(AlignakTest):
def setUp(self):
super(TestLogging, self).setUp()
# By default get alignak logger and setup to Info level and add collector
self.logger = logging.getLogger(ALIGNAK_LOGGER_NAME)
# Default is 3 handlers are available
self.assertEqual(len(self.logger.handlers), 3)
# Specific for unit tests ... else the log collector is not enabled at this level
self.set_unit_tests_logger_level(logging.DEBUG)
def test_default_logger_values(self):
""" Test default logger values
:return:
"""
# Use a logger included in the default Alignak logger hierarchy
test_logger = logging.getLogger("alignak.test.name")
set_log_level(logging.WARNING)
assert test_logger.parent == self.logger_
test_logger.debug("Debug log")
test_logger.info("Info log")
test_logger.warning("Warning log")
test_logger.error("Error log")
test_logger.critical("Critical log")
self.show_logs()
self.assert_no_log_match(
re.escape(u"Debug log")
)
self.assert_no_log_match(
re.escape(u"Info log")
)
self.assert_any_log_match(
re.escape(u"Warning log")
)
self.assert_any_log_match(
re.escape(u"Error log")
)
self.assert_any_log_match(
re.escape(u"Critical log")
)
self.show_logs()
def test_change_level_and_get_msg(self):
""" Test change log level
:return:
"""
# Use the default unit tests logger
set_log_level(logging.DEBUG)
self.clear_logs()
self.logger_.debug("This message is emitted in DEBUG")
self.assert_any_log_match("This message is emitted in DEBUG")
set_log_level(logging.INFO)
self.clear_logs()
self.logger_.debug("This message will not be emitted")
self.assert_no_log_match("This message will not be emitted")
set_log_level(logging.WARNING)
self.clear_logs()
self.logger_.debug("This message will not be emitted")
self.assert_no_log_match("This message will not be emitted")
self.logger_.info("This message will not be emitted")
self.assert_no_log_match("This message will not be emitted")
def test_log_and_change_level(self):
""" Test change log level 2
:return:
"""
# Use the default unit tests logger
set_log_level(logging.INFO)
self.logger_.info("This message will be collected")
set_log_level(logging.WARNING)
self.logger_.info("This message won't be collected")
self.show_logs()
self.assert_any_log_match("This message will be collected")
self.assert_no_log_match("This message won't be collected")
def test_log_utf8(self):
""" Log as UTF8 format
:return:
"""
set_log_level(logging.INFO)
# Some special characters
# dollar, pound, currency, accented French
self.logger.info(u"I love myself $£¤ éàçèùè")
# A russian text
self.logger.info(u"На берегу пустынных волн")
# A chines text
self.logger.info(u"新年快乐")
self.show_logs()
|
class TestLogging(AlignakTest):
def setUp(self):
pass
def test_default_logger_values(self):
''' Test default logger values
:return:
'''
pass
def test_change_level_and_get_msg(self):
''' Test change log level
:return:
'''
pass
def test_log_and_change_level(self):
''' Test change log level 2
:return:
'''
pass
def test_log_utf8(self):
''' Log as UTF8 format
:return:
'''
pass
| 6 | 4 | 20 | 3 | 12 | 4 | 1 | 0.36 | 1 | 1 | 0 | 0 | 5 | 1 | 5 | 60 | 105 | 22 | 61 | 8 | 55 | 22 | 51 | 8 | 45 | 1 | 2 | 0 | 5 |
4,145 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_logging.py
|
tests.test_logging.TestLogging2
|
class TestLogging2(AlignakTest):
def setUp(self):
print("No setup")
def tearDown(self):
print("No tear down")
def test_set_console_existing(self):
# Use the default unit tests logger
logger_configuration_file = os.path.join(os.getcwd(), './etc/alignak-logger.json')
print("Logger configuration file: %s" % logger_configuration_file)
self._set_console_log(logger_configuration_file)
def test_set_console(self):
# Use the no console unit tests logger
logger_configuration_file = os.path.join(os.getcwd(),
'./etc/no_console_alignak-logger.json')
print("Logger configuration file: %s" % logger_configuration_file)
self._set_console_log(logger_configuration_file)
def _set_console_log(self, logger_configuration_file):
"""Set console logger for Alignak arbiter verify mode"""
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for hdlr in logger_.handlers:
if getattr(hdlr, 'filename', None):
print("- handler : %s - %s (%s) -> %s" % (hdlr.level, hdlr, hdlr.formatter._fmt,
hdlr.filename))
else:
print("- handler : %s - %s (%s)" % (hdlr.level, hdlr, hdlr.formatter._fmt))
print("--///--")
setup_logger(logger_configuration_file, log_dir=None, process_name='', log_file='')
self.logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
set_log_level(logging.INFO)
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for hdlr in logger_.handlers:
if getattr(hdlr, 'filename', None):
print("- handler : %s - %s (%s) -> %s" % (hdlr.level, hdlr, hdlr.formatter._fmt,
hdlr.filename))
else:
print("- handler : %s - %s (%s)" % (hdlr.level, hdlr, hdlr.formatter._fmt))
# Log message
self.logger_.info("Message")
self.show_logs()
set_log_console(logging.WARNING)
# Log message
self.logger_.info("Message")
self.show_logs()
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for hdlr in logger_.handlers:
if getattr(hdlr, 'filename', None):
print("- handler : %s - %s (%s) -> %s" % (hdlr.level, hdlr, hdlr.formatter._fmt,
hdlr.filename))
else:
print("- handler : %s - %s (%s)" % (hdlr.level, hdlr, hdlr.formatter._fmt))
|
class TestLogging2(AlignakTest):
def setUp(self):
pass
def tearDown(self):
pass
def test_set_console_existing(self):
pass
def test_set_console_existing(self):
pass
def _set_console_log(self, logger_configuration_file):
'''Set console logger for Alignak arbiter verify mode'''
pass
| 6 | 1 | 11 | 1 | 9 | 1 | 2 | 0.11 | 1 | 0 | 0 | 0 | 5 | 1 | 5 | 60 | 62 | 12 | 45 | 11 | 39 | 5 | 38 | 11 | 32 | 7 | 2 | 2 | 11 |
4,146 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_logging.py
|
tests.test_logging.TestLogging3
|
class TestLogging3(AlignakTest):
def setUp(self):
super(TestLogging3, self).setUp()
# Clear logs and reset the logger
self.clear_logs()
# Remove all existing handlers (if some!)
# Former existing configuration
self.logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
self.logger_.handlers = []
def tearDown(self):
print("No tear down")
def test_log_format(self):
""" Log string format
:return:
"""
# Use the default unit tests logger
# Configure the logger with a daemon name
# Former existing configuration
self.logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
assert not self.logger_.handlers
logger_configuration_file = os.path.join(self._test_dir, './etc/alignak-logger.json')
setup_logger(logger_configuration_file, log_dir=None,
process_name='process_name', log_file='')
# Newly configured configuration
self.logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
print("Logger new handlers:")
for handler in self.logger_.handlers:
print("- handler %s: %s (%s)"
% (getattr(handler, '_name', None), handler, handler.formatter._fmt))
set_log_level(logging.INFO)
msg = "Message"
self.logger_.info(msg)
self.show_logs()
# The logger default format is including 'alignak_tests.'
# Now the get process_name in place of alignak_tests!
# [2020-01-26 09:48:38] INFO: [process_name.alignak] Message
self.assert_any_log_match(r'[\[0-9\]*] INFO: \[process_name.%s\] %s'
% (self.logger_.name, msg))
# Configure the logger with a daemon name
logger_configuration_file = os.path.join(self._test_dir, './etc/alignak-logger.json')
setup_logger(logger_configuration_file, log_dir=None,
process_name='process_name', log_file='')
self.logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
print("Logger configuration: ")
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for hdlr in logger_.handlers:
if getattr(hdlr, 'filename', None) and 'alignak_tests' in hdlr.filename:
print("- handler : %s (%s) -> %s" % (hdlr, hdlr.formatter._fmt, hdlr.filename))
else:
print("- handler : %s (%s)" % (hdlr, hdlr.formatter._fmt))
set_log_level(logging.INFO)
msg2 = "Message 2"
self.logger_.info(msg2)
self.show_logs()
# The logger default format is including 'alignak_tests.'
# Now the get process_name in place of alignak_tests!
self.assert_any_log_match(r'[\[0-9\]*] INFO: \[process_name.%s\] %s'
% (self.logger_.name, msg))
self.assert_any_log_match(r'[\[0-9\]*] INFO: \[process_name.%s\] %s'
% (self.logger_.name, msg2))
|
class TestLogging3(AlignakTest):
def setUp(self):
pass
def tearDown(self):
pass
def test_log_format(self):
''' Log string format
:return:
'''
pass
| 4 | 1 | 23 | 3 | 14 | 5 | 2 | 0.36 | 1 | 1 | 0 | 0 | 3 | 1 | 3 | 58 | 72 | 12 | 44 | 11 | 40 | 16 | 37 | 11 | 33 | 4 | 2 | 2 | 6 |
4,147 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_macros_modulations.py
|
tests.test_macros_modulations.TestMacroModulations
|
class TestMacroModulations(AlignakTest):
def setUp(self):
super(TestMacroModulations, self).setUp()
self.setup_with_file('cfg/cfg_macros_modulation.cfg',
dispatching=True)
assert self.conf_is_correct
def test_macros_modulation(self):
""" Test macros modulation """
# Get the host
host = self._scheduler.hosts.find_by_name("modulated_host")
assert host is not None
assert host.macromodulations is not None
# Get its macros modulations
mod = self._scheduler.macromodulations.find_by_name("MODULATION")
assert mod is not None
assert mod.get_name() == "MODULATION"
assert mod.is_active(self._scheduler.timeperiods)
assert mod.uuid in host.macromodulations
mod2 = self._scheduler.macromodulations.find_by_name("MODULATION2")
assert mod2 is not None
assert mod2.get_name() == "MODULATION2"
assert mod2.is_active(self._scheduler.timeperiods)
assert mod2.uuid in host.macromodulations
# Get the host service
svc = self._scheduler.services.find_srv_by_name_and_hostname("modulated_host",
"modulated_service")
# Service is going CRITICAL/HARD ... this forces an host check!
assert len(host.checks_in_progress) == 0
self.scheduler_loop(1, [[svc, 2, 'BAD']])
self.show_checks()
assert len(host.checks_in_progress) == 1
for c in host.checks_in_progress:
print("Check: %s / %s" % (c, self._scheduler.checks[c]))
for c in host.checks_in_progress:
# The host has a custom macro defined as UNCHANGED
# The host has 2 attached modulations impacting this macro value.
# The first one with the value MODULATED and the second with NOT_THE_GOOD.
# Both are currently active, but we want to get the first one
assert 'plugins/nothing MODULATED' == self._scheduler.checks[c].command
|
class TestMacroModulations(AlignakTest):
def setUp(self):
pass
def test_macros_modulation(self):
''' Test macros modulation '''
pass
| 3 | 1 | 21 | 2 | 15 | 5 | 2 | 0.3 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 57 | 45 | 6 | 30 | 8 | 27 | 9 | 28 | 8 | 25 | 3 | 2 | 1 | 4 |
4,148 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/misc/custom_module.py
|
alignak.misc.custom_module.CustomModule
|
class CustomModule(ModuleType):
"""Custom module that can be used to customize a module namespace,
example usage:
>>> import sys
>>> assert __name__ == 'custom_module' # required for the import after
>>> class MyCustomModule(CustomModule):
... count = 0
... @property
... def an_attribute(self):
... self.count += 1
... return "hey ! I'm a module attribute but also a property !"
>>> sys.modules[__name__] = MyCustomModule(__name__, globals())
# then, in another module:
>>> import custom_module
>>> assert custom_module.count == 0
>>> custom_module.an_attribute
"hey ! I'm a module attribute but also a property !"
>>> assert custom_module.count == 1
"""
def __init__(self, name, orig_mod_globals):
super(CustomModule, self).__init__(name)
self.__dict__.update(**orig_mod_globals)
|
class CustomModule(ModuleType):
'''Custom module that can be used to customize a module namespace,
example usage:
>>> import sys
>>> assert __name__ == 'custom_module' # required for the import after
>>> class MyCustomModule(CustomModule):
... count = 0
... @property
... def an_attribute(self):
... self.count += 1
... return "hey ! I'm a module attribute but also a property !"
>>> sys.modules[__name__] = MyCustomModule(__name__, globals())
# then, in another module:
>>> import custom_module
>>> assert custom_module.count == 0
>>> custom_module.an_attribute
"hey ! I'm a module attribute but also a property !"
>>> assert custom_module.count == 1
'''
def __init__(self, name, orig_mod_globals):
pass
| 2 | 1 | 3 | 0 | 3 | 0 | 1 | 4.5 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 1 | 26 | 4 | 4 | 2 | 2 | 18 | 4 | 2 | 2 | 1 | 1 | 0 | 1 |
4,149 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/misc/serialization.py
|
alignak.misc.serialization.AlignakClassLookupException
|
class AlignakClassLookupException(Exception):
"""Class for exceptions occurring in get_alignak_class from alignak.misc.serialization
"""
pass
|
class AlignakClassLookupException(Exception):
'''Class for exceptions occurring in get_alignak_class from alignak.misc.serialization
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 5 | 1 | 2 | 1 | 1 | 2 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
4,150 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/modules/inner_metrics.py
|
alignak.modules.inner_metrics.InnerMetrics
|
class InnerMetrics(BaseModule): # pylint: disable=too-many-instance-attributes
"""
This class is used to store/restore retention data
"""
def __init__(self, mod_conf): # pylint: disable=too-many-branches
"""Module initialization
mod_conf is a dictionary that contains:
- all the variables declared in the module configuration
- a 'properties' value that is the module properties as defined globally in this file
:param mod_conf: module configuration file as a dictionary
"""
BaseModule.__init__(self, mod_conf)
# pylint: disable=global-statement
global logger
logger = logging.getLogger('alignak.module.%s' % self.alias)
logger.setLevel(getattr(mod_conf, 'log_level', logging.INFO))
logger.debug("inner properties: %s", self.__dict__)
logger.debug("received configuration: %s", mod_conf.__dict__)
logger.info("loaded by the %s '%s'", self.my_daemon.type, self.my_daemon.name)
# Output file target
self.output_file = getattr(mod_conf, 'output_file', '')
if 'ALIGNAK_HOSTS_STATS_FILE' in os.environ:
self.output_file = os.environ['ALIGNAK_HOSTS_STATS_FILE']
# Graphite / InfluxDB targets
self.graphite_enabled = (getattr(mod_conf, 'graphite_enabled', '0') != '0')
if isinstance(getattr(mod_conf, 'graphite_enabled', '0'), bool):
self.graphite_enabled = getattr(mod_conf, 'graphite_enabled')
self.influxdb_enabled = (getattr(mod_conf, 'influxdb_enabled', '0') != '0')
if isinstance(getattr(mod_conf, 'influxdb_enabled', '0'), bool):
self.influxdb_enabled = getattr(mod_conf, 'influxdb_enabled')
if self.influxdb_enabled and not influxdb_lib:
logger.info("Sending metrics to InfluxDB is enabled but the influxdb Python "
"library is not installed. You should 'pip install influxdb'! "
"As of now, sending to influxdb is disabled.")
self.influxdb_enabled = False
logger.info("targets configuration: graphite: %s, influxdb: %s, file: %s",
self.graphite_enabled, self.influxdb_enabled, self.output_file)
if self.output_file:
logger.warning("Storing metrics in an output file is configured. Do not forget "
"to regularly clean this file to avoid important disk usage!")
self.enabled = getattr(mod_conf, 'enabled', '0') != '0'
if isinstance(getattr(mod_conf, 'enabled', '0'), bool):
self.enabled = getattr(mod_conf, 'enabled')
if not self.output_file and not self.graphite_enabled and not self.influxdb_enabled:
logger.warning("The metrics sending module is enabled but no target is defined. You "
"should set one of the 'output_file', or 'graphite_enabled' or "
"'influxdb_enabled' parameter to specify where the metrics "
"must be pushed! As of now, the module is disabled.")
self.enabled = False
# Hosts and services internal cache
# - contain the hosts and services names and specific parameters
# - updated with the initial hosts/services status broks
self.hosts_cache = {}
self.services_cache = {}
# Do not ignore unknown hosts/services. If set, this parameter will make the module
# ignore the provided broks until the initial status broks are received
# Then the module will only manage metrics if hosts/services are known in the internal cache
self.ignore_unknown = getattr(mod_conf, 'ignore_unknown', '1') == '1'
if isinstance(getattr(mod_conf, 'ignore_unknown', '0'), bool):
self.ignore_unknown = getattr(mod_conf, 'ignore_unknown')
logger.info("ignoring unknown: %s", self.ignore_unknown)
# Separate performance data multiple values
self.multiple_values = re.compile(r'_(\d+)$')
# Internal metrics cache
self.my_metrics = []
self.metrics_flush_count = int(getattr(mod_conf, 'metrics_flush_count', '256'))
self.last_failure = 0
self.metrics_flush_pause = int(os.getenv('ALIGNAK_STATS_FLUSH_PAUSE', '10'))
self.log_metrics_flush_pause = False
# Specific filter for host and services names for Graphite
self.illegal_char_hostname = re.compile(r'[^a-zA-Z0-9_\-]')
# Graphite target
self.graphite_host = getattr(mod_conf, 'graphite_host', 'localhost')
self.graphite_port = int(getattr(mod_conf, 'graphite_port', '2004'))
self.carbon = None
logger.info("graphite host/port: %s:%d", self.graphite_host, self.graphite_port)
# optional prefix / suffix in graphite for Alignak data source
self.graphite_data_source = \
sanitize_name(getattr(mod_conf, 'graphite_data_source', ''))
self.graphite_prefix = getattr(mod_conf, 'graphite_prefix', '')
self.realms_prefix = (getattr(mod_conf, 'realms_prefix', '0') != '0')
if isinstance(getattr(mod_conf, 'realms_prefix', '0'), bool):
self.realms_prefix = getattr(mod_conf, 'realms_prefix')
logger.info("graphite prefix: %s, realm prefix: %s, data source: %s",
self.graphite_prefix, self.realms_prefix, self.graphite_data_source)
if self.graphite_enabled and not self.graphite_host:
logger.warning("Graphite host name is not set, no metrics will be sent to Graphite!")
self.graphite_enabled = False
# InfluxDB target
self.influxdb_host = getattr(mod_conf, 'influxdb_host', 'localhost')
self.influxdb_port = int(getattr(mod_conf, 'influxdb_port', '8086'))
self.influxdb_database = getattr(mod_conf, 'influxdb_database', 'alignak')
# Default is empty - do not used authenticated connection
self.influxdb_username = getattr(mod_conf, 'influxdb_username', '')
self.influxdb_password = getattr(mod_conf, 'influxdb_password', '')
# Default is empty - do not use a specific retention
self.influxdb_retention_name = \
getattr(mod_conf, 'influxdb_retention_name', '')
self.influxdb_retention_duration = \
getattr(mod_conf, 'influxdb_retention_duration', 'INF')
self.influxdb_retention_replication = \
getattr(mod_conf, 'influxdb_retention_replication', '1')
self.influx = None
logger.info("influxdb host/port: %s:%d", self.influxdb_host, self.influxdb_port)
logger.info("influxdb database: %s, retention: %s:%s:%s",
self.influxdb_database, self.influxdb_retention_name,
self.influxdb_retention_duration, self.influxdb_retention_replication)
# optional tags list in influxdb for Alignak data source
self.influxdb_tags = getattr(mod_conf, 'influxdb_tags', None)
if self.influxdb_tags:
tags_list = {}
tags = self.influxdb_tags.split(',')
for tag in tags:
if '=' in tag:
tag = tag.split('=')
tags_list[tag[0]] = tag[1]
if tags_list:
self.influxdb_tags = tags_list
logger.info("influxdb tags: %s", self.influxdb_tags)
if self.influxdb_enabled and not self.influxdb_host:
logger.warning("InfluxDB host name is not set, no metrics will be sent to InfluxDB!")
self.influxdb_enabled = False
# Used to reset check time into the scheduled time.
# Carbon/graphite does not like latency data and creates blanks in graphs
# Every data with "small" latency will be considered create at scheduled time
self.ignore_latency_limit = int(getattr(mod_conf, 'ignore_latency_limit', '0'))
if self.ignore_latency_limit < 0:
self.ignore_latency_limit = 0
# service name to use for host check
self.hostcheck = sanitize_name(getattr(mod_conf, 'host_check', 'hostcheck'))
# Send warning, critical, min, max
self.send_warning = bool(getattr(mod_conf, 'send_warning', False))
logger.info("send warning metrics: %d", self.send_warning)
self.send_critical = bool(getattr(mod_conf, 'send_critical', False))
logger.info("send critical metrics: %d", self.send_critical)
self.send_min = bool(getattr(mod_conf, 'send_min', False))
logger.info("send min metrics: %d", self.send_min)
self.send_max = bool(getattr(mod_conf, 'send_max', False))
logger.info("send max metrics: %d", self.send_max)
if not self.enabled:
logger.warning("inner metrics module is loaded but is not enabled.")
return
logger.info("metrics module is loaded and enabled")
def init(self): # pylint: disable=too-many-branches
"""Called by the daemon broker to initialize the module"""
if not self.enabled:
logger.info(" the module is disabled.")
return True
connections = False
try:
connections = self.test_connection()
except Exception as exp: # pylint: disable=broad-except
logger.error("initialization, test connection failed. Error: %s", str(exp))
if self.influxdb_enabled:
try:
# Check that configured TSDB is existing, else creates...
dbs = self.influx.get_list_database()
for db in dbs:
if db.get('name') == self.influxdb_database:
logger.info("the database %s is existing.", self.influxdb_database)
break
else:
# Create the database
logger.info("creating database %s...", self.influxdb_database)
self.influx.create_database(self.influxdb_database)
# Check that configured TSDB retention is existing, else creates...
if self.influxdb_retention_name:
rps = self.influx.get_list_retention_policies()
for rp in rps:
if rp.get('name') == self.influxdb_retention_name:
logger.info("the retention policy %s is existing.",
self.influxdb_retention_name)
break
else:
# Create a retention policy for this database
logger.info("creating database retention policy: %s - %s - %s...",
self.influxdb_retention_name, self.influxdb_retention_duration,
self.influxdb_retention_replication)
self.influx.create_retention_policy(
self.influxdb_retention_name, self.influxdb_retention_duration,
self.influxdb_retention_replication, database=self.influxdb_database)
# Check that configured TSDB user is existing, else creates...
if self.influxdb_username:
users = self.influx.get_list_users()
for user in users:
if user.get('user') == self.influxdb_username:
logger.info("the user %s is existing.",
self.influxdb_username)
break
else:
# Create a retention policy for this database
logger.info("creating user: %s...", self.influxdb_username)
self.influx.create_user(self.influxdb_username, self.influxdb_password,
admin=False)
connections = connections or True
except Exception as exp: # pylint: disable=broad-except
logger.error("InfluxDB, DB initialization failed. Error: %s", str(exp))
return connections
def test_connection(self):
"""Called to test the connection
Returns True if all configured targets are ok
:return: bool
"""
if not self.enabled:
return False
connections = False
if self.output_file:
logger.info("testing storage to %s ...", self.output_file)
try:
with open(self.output_file, 'a') as fp:
fp.write("%s;%s;%s\n" % (int(time.time()), 'connection-test', int(time.time())))
except Exception as exp: # pylint: disable=broad-except
logger.error("File output test, error: %s", str(exp))
else:
connections = connections or True
logger.info("Ok")
if self.influxdb_enabled:
logger.info("testing connection to InfluxDB %s:%d ...",
self.influxdb_host, self.influxdb_port)
if not self.influx:
self.influx = InfluxDBClient(host=self.influxdb_host, port=self.influxdb_port,
database=self.influxdb_database,
username=self.influxdb_username,
password=self.influxdb_password)
try:
# Check that connection is correct
version = self.influx.ping()
logger.info("connected, InfluxDB version %s", version)
except Exception as exp: # pylint: disable=broad-except
logger.error("InfluxDB test, error: %s", str(exp))
else:
connections = connections or True
if self.graphite_enabled:
logger.info("testing connection to Graphite %s:%d ...",
self.graphite_host, self.graphite_port)
if not self.carbon:
self.carbon = CarbonIface(self.graphite_host, self.graphite_port)
carbon_data = [
('.'.join([self.graphite_prefix, 'connection-test']),
('connection-test', int(time.time())))
]
self.carbon.add_data_list(carbon_data)
if self.carbon.send_data():
connections = connections or True
logger.info("Ok")
else:
logger.error("Ko")
return connections
# Common functions
def do_loop_turn(self):
"""This function is called/used when you need a module with
a loop function (and use the parameter 'external': True)
"""
logger.info("In loop...")
time.sleep(1)
def get_metrics_from_perfdata(self, service, perf_data):
"""Decode the performance data to build a metrics list"""
result = []
metrics = PerfDatas(perf_data)
for metric in metrics:
logger.debug("service: %s, metric: %s (%s)", service, metric, metric.__dict__)
if metric.name in ['time']:
metric.name = "duration"
name = sanitize_name(metric.name)
name = self.multiple_values.sub(r'.\1', name)
if not name:
continue
# get metric value and its thresholds values if they exist
name_value = {
name: metric.value,
'uom_' + name: metric.uom
}
# Get or ignore extra values depending upon module configuration
if metric.warning and self.send_warning:
name_value[name + '_warn'] = metric.warning
if metric.critical and self.send_critical:
name_value[name + '_crit'] = metric.critical
if metric.min and self.send_min:
name_value[name + '_min'] = metric.min
if metric.max and self.send_max:
name_value[name + '_max'] = metric.max
for key, value in name_value.items():
result.append((key, value, metric.uom))
logger.debug("Metrics: %s - %s", service, result)
return result
@property
def metrics_count(self):
"""
Number of internal stored metrics
:return:
"""
return len(self.my_metrics)
def flush(self, log=False): # pylint:disable=too-many-branches, too-many-nested-blocks
"""Send inner stored metrics to the configured Graphite or InfluxDB
Returns False if the sending failed with a warning log if log parameter is set
:param log: to log information or not
:type log: bool
:return: bool
"""
if not self.my_metrics:
logger.debug("Flushing - no metrics to send")
return True
now = int(time.time())
if self.last_failure and self.last_failure + self.metrics_flush_pause > now:
if not self.log_metrics_flush_pause:
logger.warning("Flush paused on connection error (last failed: %d). "
"Inner stored metric: %d. Trying to send...",
self.last_failure, self.metrics_count)
self.log_metrics_flush_pause = True
if not self.test_connection():
return False
metrics_sent = False
metrics_saved = False
# Flushing to Graphite
if self.graphite_enabled:
try:
logger.debug("Flushing %d metrics to Graphite/carbon", self.metrics_count)
carbon_data = []
for metric in self.my_metrics:
# Get path
path = metric['tags']['path']
for name, value in metric['fields'].items():
carbon_data.append(
('.'.join([self.graphite_prefix, '.'.join([path, name])]),
(metric['time'], value)))
self.carbon.add_data_list(carbon_data)
if self.carbon.send_data():
metrics_sent = True
else:
if log:
logger.warning("Failed sending metrics to Graphite/carbon. "
"Inner stored metric: %d", self.metrics_count)
if self.log_metrics_flush_pause:
logger.warning("Metrics flush restored. "
"Remaining stored metric: %d", self.metrics_count)
self.last_failure = 0
self.log_metrics_flush_pause = False
except Exception as exp: # pylint: disable=broad-except
if not self.log_metrics_flush_pause:
logger.warning("Failed sending metrics to Graphite/carbon: %s:%d. "
"Inner stored metrics count: %d.",
self.graphite_host, self.graphite_port, self.metrics_count)
logger.warning("Exception: %s / %s", str(exp), traceback.print_exc())
else:
logger.warning("Flush paused on connection error (last failed: %d). "
"Inner stored metric: %d. Trying to send...",
self.last_failure, self.metrics_count)
self.last_failure = now
return False
# Flushing to InfluxDB
# pylint: disable=too-many-nested-blocks
if self.influxdb_enabled:
try:
logger.debug("Flushing %d metrics to InfluxDB", self.metrics_count)
for metric in self.my_metrics:
metric['time'] *= 1000000000
for name, value in metric['fields'].items():
if name.startswith('uom_'):
continue
# Force set float values
if not isinstance(value, float):
try:
value = float(value)
except Exception: # pylint: disable=broad-except
pass
metric['fields'][name] = value
if self.influxdb_tags is not None and isinstance(self.influxdb_tags, dict):
metric['tags'].update(self.influxdb_tags)
# Write data to InfluxDB
metrics_sent = self.influx.write_points(self.my_metrics)
if self.log_metrics_flush_pause:
logger.warning("Metrics flush restored. "
"Remaining stored metric: %d", self.metrics_count)
self.last_failure = 0
self.log_metrics_flush_pause = False
except Exception as exp: # pylint: disable=broad-except
logger.warning("*** Exception: %s", str(exp))
if not self.log_metrics_flush_pause:
logger.warning("Failed sending metrics to InfluxDB: %s:%d. "
"Inner stored metrics count: %d.",
self.influxdb_host, self.influxdb_port, self.metrics_count)
logger.warning("Exception: %s", str(exp))
else:
logger.warning("Flush paused on connection error (last failed: %d). "
"Inner stored metric: %d. Trying to send...",
self.last_failure, self.metrics_count)
self.last_failure = now
return False
if self.output_file:
try:
logger.debug("Storing %d metrics to %s", self.metrics_count, self.output_file)
with open(self.output_file, 'a') as fp:
for metric in self.my_metrics:
# Get path
path = metric['tags']['path']
for name, value in metric['fields'].items():
fp.write("%s;%s;%s\n" % (metric['time'], '.'.join((path, name)), value))
metrics_saved = True
except Exception as exp: # pylint: disable=broad-except
logger.warning("Failed writing to a file: %s. "
"Inner stored metrics count: %d\n Exception: %s",
self.output_file, self.metrics_count, str(exp))
return False
if ((self.graphite_host or self.influxdb_host) and metrics_sent) or \
(self.output_file and metrics_saved):
self.my_metrics = []
return True
def send_to_tsdb(self, realm, host, service, metrics, ts, path):
"""Send performance data to time series database
Indeed this function stores metrics in the internal cache and checks if the flushing
is necessary and then flushes.
:param realm: concerned realm
:type: string
:param host: concerned host
:type: string
:param service: concerned service
:type: string
:param metrics: list of metrics couple (name, value)
:type: list
:param ts: timestamp
:type: int
:param path: full path (eg. Graphite) for the received metrics
:type: string
"""
if ts is None:
ts = int(time.time())
data = {
"measurement": service,
"tags": {
"host": host,
"service": service,
"realm": '.'.join(realm) if isinstance(realm, list) else realm,
"path": path
},
"time": ts,
"fields": {}
}
if path is not None:
data['tags'].update({"path": path})
for metric, value, _ in metrics:
data['fields'].update({metric: value})
# Flush if necessary
logger.debug("Metrics data: %s", data)
self.my_metrics.append(data)
if self.metrics_count >= self.metrics_flush_count:
# self.carbon.add_data_list(self.my_metrics)
self.flush()
def manage_initial_service_status_brok(self, b):
"""Prepare the known services cache"""
host_name = b.data['host_name']
service_description = b.data['service_description']
service_id = host_name+"/"+service_description
logger.debug("got initial service status: %s", service_id)
if host_name not in self.hosts_cache:
logger.error("initial service status, host is unknown: %s.", service_id)
return
self.services_cache[service_id] = {
}
if 'customs' in b.data:
self.services_cache[service_id]['_GRAPHITE_POST'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_POST', None))
logger.debug("initial service status received: %s", service_id)
def manage_initial_host_status_brok(self, b):
"""Prepare the known hosts cache"""
host_name = b.data['host_name']
logger.debug("got initial host status: %s", host_name)
self.hosts_cache[host_name] = {
'realm_name':
sanitize_name(b.data.get('realm_name', b.data.get('realm', 'All'))),
}
if 'customs' in b.data:
self.hosts_cache[host_name]['_GRAPHITE_PRE'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_PRE', None))
self.hosts_cache[host_name]['_GRAPHITE_GROUP'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_GROUP', None))
logger.debug("initial host status received: %s", host_name)
def manage_service_check_result_brok(self, b): # pylint: disable=too-many-branches
"""A service check result brok has just arrived ..."""
host_name = b.data.get('host_name', None)
service_description = b.data.get('service_description', None)
if not host_name or not service_description:
return
service_id = host_name+"/"+service_description
logger.debug("service check result: %s", service_id)
# If host and service initial status broks have not been received, ignore ...
if not self.ignore_unknown and host_name not in self.hosts_cache:
logger.warning("received service check result for an unknown host: %s", service_id)
return
if service_id not in self.services_cache and not self.ignore_unknown:
logger.warning("received service check result for an unknown service: %s", service_id)
return
# Decode received metrics
metrics = self.get_metrics_from_perfdata(service_description, b.data['perf_data'])
if not metrics:
logger.debug("no metrics to send ...")
return
# If checks latency is ignored
if self.ignore_latency_limit >= b.data['latency'] > 0:
check_time = int(b.data['last_chk']) - int(b.data['latency'])
else:
check_time = int(b.data['last_chk'])
# Custom hosts variables
hname = sanitize_name(host_name)
if host_name in self.hosts_cache:
if self.hosts_cache[host_name].get('_GRAPHITE_GROUP', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_GROUP'), hname))
if self.hosts_cache[host_name].get('_GRAPHITE_PRE', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_PRE'), hname))
# Custom services variables
desc = sanitize_name(service_description)
if service_id in self.services_cache:
if self.services_cache[service_id].get('_GRAPHITE_POST', None):
desc = ".".join((desc, self.services_cache[service_id].get('_GRAPHITE_POST', None)))
# Graphite data source
if self.graphite_data_source:
path = '.'.join((hname, self.graphite_data_source, desc))
else:
path = '.'.join((hname, desc))
# Realm as a prefix
if self.realms_prefix and self.hosts_cache[host_name].get('realm_name', None):
path = '.'.join((self.hosts_cache[host_name].get('realm_name'), path))
realm_name = None
if host_name in self.hosts_cache:
realm_name = self.hosts_cache[host_name].get('realm_name', None)
# Send metrics
self.send_to_tsdb(realm_name, host_name, service_description, metrics, check_time, path)
def manage_host_check_result_brok(self, b): # pylint: disable=too-many-branches
"""An host check result brok has just arrived..."""
host_name = b.data.get('host_name', None)
if not host_name:
return
logger.debug("host check result: %s", host_name)
# If host initial status brok has not been received, ignore ...
if host_name not in self.hosts_cache and not self.ignore_unknown:
logger.warning("received host check result for an unknown host: %s", host_name)
return
# Decode received metrics
metrics = self.get_metrics_from_perfdata('host_check', b.data['perf_data'])
if not metrics:
logger.debug("no metrics to send ...")
return
# If checks latency is ignored
if self.ignore_latency_limit >= b.data['latency'] > 0:
check_time = int(b.data['last_chk']) - int(b.data['latency'])
else:
check_time = int(b.data['last_chk'])
# Custom hosts variables
hname = sanitize_name(host_name)
if host_name in self.hosts_cache:
if self.hosts_cache[host_name].get('_GRAPHITE_GROUP', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_GROUP'), hname))
if self.hosts_cache[host_name].get('_GRAPHITE_PRE', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_PRE'), hname))
# Graphite data source
if self.graphite_data_source:
path = '.'.join((hname, self.graphite_data_source))
if self.hostcheck:
path = '.'.join((hname, self.graphite_data_source, self.hostcheck))
else:
path = '.'.join((hname, self.hostcheck))
# Realm as a prefix
if self.realms_prefix and self.hosts_cache[host_name].get('realm_name', None):
path = '.'.join((self.hosts_cache[host_name].get('realm_name'), path))
realm_name = None
if host_name in self.hosts_cache:
realm_name = self.hosts_cache[host_name].get('realm_name', None)
# Send metrics
self.send_to_tsdb(realm_name, host_name, self.hostcheck, metrics, check_time, path)
|
class InnerMetrics(BaseModule):
'''
This class is used to store/restore retention data
'''
def __init__(self, mod_conf):
'''Module initialization
mod_conf is a dictionary that contains:
- all the variables declared in the module configuration
- a 'properties' value that is the module properties as defined globally in this file
:param mod_conf: module configuration file as a dictionary
'''
pass
def init(self):
'''Called by the daemon broker to initialize the module'''
pass
def test_connection(self):
'''Called to test the connection
Returns True if all configured targets are ok
:return: bool
'''
pass
def do_loop_turn(self):
'''This function is called/used when you need a module with
a loop function (and use the parameter 'external': True)
'''
pass
def get_metrics_from_perfdata(self, service, perf_data):
'''Decode the performance data to build a metrics list'''
pass
@property
def metrics_count(self):
'''
Number of internal stored metrics
:return:
'''
pass
def flush(self, log=False):
'''Send inner stored metrics to the configured Graphite or InfluxDB
Returns False if the sending failed with a warning log if log parameter is set
:param log: to log information or not
:type log: bool
:return: bool
'''
pass
def send_to_tsdb(self, realm, host, service, metrics, ts, path):
'''Send performance data to time series database
Indeed this function stores metrics in the internal cache and checks if the flushing
is necessary and then flushes.
:param realm: concerned realm
:type: string
:param host: concerned host
:type: string
:param service: concerned service
:type: string
:param metrics: list of metrics couple (name, value)
:type: list
:param ts: timestamp
:type: int
:param path: full path (eg. Graphite) for the received metrics
:type: string
'''
pass
def manage_initial_service_status_brok(self, b):
'''Prepare the known services cache'''
pass
def manage_initial_host_status_brok(self, b):
'''Prepare the known hosts cache'''
pass
def manage_service_check_result_brok(self, b):
'''A service check result brok has just arrived ...'''
pass
def manage_host_check_result_brok(self, b):
'''An host check result brok has just arrived...'''
pass
| 14 | 13 | 55 | 8 | 38 | 10 | 9 | 0.26 | 1 | 9 | 2 | 0 | 12 | 36 | 12 | 32 | 676 | 108 | 463 | 103 | 448 | 119 | 387 | 97 | 373 | 28 | 2 | 6 | 113 |
4,151 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/misc/perfdata.py
|
alignak.misc.perfdata.Metric
|
class Metric(object):
# pylint: disable=too-few-public-methods
"""
Class providing a small abstraction for one metric of a Perfdatas class
"""
def __init__(self, string):
self.name = self.value = self.uom = \
self.warning = self.critical = self.min = self.max = None
string = string.strip()
matches = METRIC_PATTERN.match(string)
if matches:
# Get the name but remove all ' in it
self.name = matches.group(1).replace("'", "")
self.value = guess_int_or_float(matches.group(2))
self.uom = matches.group(3)
self.warning = guess_int_or_float(matches.group(4))
self.critical = guess_int_or_float(matches.group(5))
self.min = guess_int_or_float(matches.group(6))
self.max = guess_int_or_float(matches.group(7))
if self.uom == '%':
self.min = 0
self.max = 100
def __str__(self): # pragma: no cover
string = "%s=%s%s" % (self.name, self.value, self.uom)
if self.warning:
string += ";%s" % (self.warning)
if self.critical:
string += ";%s" % (self.critical)
return string
|
class Metric(object):
'''
Class providing a small abstraction for one metric of a Perfdatas class
'''
def __init__(self, string):
pass
def __str__(self):
pass
| 3 | 1 | 12 | 0 | 12 | 1 | 3 | 0.25 | 1 | 0 | 0 | 0 | 2 | 7 | 2 | 2 | 30 | 1 | 24 | 7 | 21 | 6 | 23 | 6 | 20 | 3 | 1 | 2 | 6 |
4,152 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/misc/perfdata.py
|
alignak.misc.perfdata.PerfDatas
|
class PerfDatas(object):
# pylint: disable=too-few-public-methods
"""
Class providing performance data extracted from a check output
"""
def __init__(self, string):
string = string or ''
elts = PERFDATA_SPLIT_PATTERN.findall(string)
elts = [e for e in elts if e != '']
self.metrics = {}
for elem in elts:
metric = Metric(elem)
if metric.name is not None:
self.metrics[metric.name] = metric
def __iter__(self):
return iter(list(self.metrics.values()))
def __len__(self):
return len(self.metrics)
def __getitem__(self, key):
return self.metrics[key]
def __contains__(self, key):
return key in self.metrics
|
class PerfDatas(object):
'''
Class providing performance data extracted from a check output
'''
def __init__(self, string):
pass
def __iter__(self):
pass
def __len__(self):
pass
def __getitem__(self, key):
pass
def __contains__(self, key):
pass
| 6 | 1 | 3 | 0 | 3 | 0 | 1 | 0.22 | 1 | 2 | 1 | 0 | 5 | 1 | 5 | 5 | 26 | 4 | 18 | 10 | 12 | 4 | 18 | 10 | 12 | 3 | 1 | 2 | 7 |
4,153 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/misc/carboniface.py
|
alignak.misc.carboniface.CarbonIface
|
class CarbonIface(object):
def __init__(self, host, port, event_url=None):
"""Initialize Carbon Interface.
host: host where the carbon daemon is running
port: port where carbon daemon is listening for pickle protocol on host
event_url: web app url where events can be added. It must be provided if add_event(...)
is to be used. Otherwise an exception by urllib2 will raise
"""
self.host = host
self.port = port
self.event_url = event_url
self.__data = []
self.__data_lock = threading.Lock()
def add_data(self, metric, value, ts=None):
"""
Add data to queue
:param metric: the metric name
:type metric: str
:param value: the value of data
:type value: int
:param ts: the timestamp
:type ts: int | None
:return: True if added successfully, otherwise False
:rtype: bool
"""
if not ts:
ts = time.time()
if self.__data_lock.acquire():
self.__data.append((metric, (ts, value)))
self.__data_lock.release()
return True
return False
def add_data_dict(self, dd): # pragma: no cover - never used...
"""
dd must be a dictionary where keys are the metric name,
each key contains a dictionary which at least must have 'value' key (optionally 'ts')
dd = {'experiment1.subsystem.block.metric1': {'value': 12.3, 'ts': 1379491605.55},
'experiment1.subsystem.block.metric2': {'value': 1.35},
...}
"""
if self.__data_lock.acquire():
for k, v in list(dd.items()):
ts = v.get('ts', time.time())
value = v.get('value')
self.__data.append((k, (ts, value)))
self.__data_lock.release()
return True
return False
def add_data_list(self, dl): # pragma: no cover - never used...
"""
dl must be a list of tuples like:
dl = [('metricname', (timestamp, value)),
('metricname', (timestamp, value)),
...]
"""
if self.__data_lock.acquire():
self.__data.extend(dl)
self.__data_lock.release()
return True
return False
def send_data(self, data=None):
"""If data is empty, current buffer is sent. Otherwise data must be like:
data = [('metricname', (timestamp, value)),
('metricname', (timestamp, value)),
...]
"""
save_in_error = False
if not data:
if self.__data_lock.acquire():
data = self.__data
self.__data = []
save_in_error = True
self.__data_lock.release()
else:
return False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
payload = pickle.dumps(data, protocol=2)
header = struct.pack("!L", len(payload))
message = header + payload
s.settimeout(1)
s.connect((self.host, self.port))
try:
s.send(message)
except:
# log.exception('Error when sending data to carbon')
if save_in_error:
self.__data.extend(data)
return False
else:
# log.debug('Sent data to {host}:{port}: {0} metrics, {1} bytes'.format(len(data),
# len(message), host = self.host, port=self.port))
return True
finally:
s.close()
def add_event(self, what, data=None, tags=None, when=None): # pragma: no cover - never used...
"""
:param what:
:param data:
:param tags:
:param when:
:return:
"""
if not when:
when = time.time()
postdata = '{{"what":"{0}", "when":{1}'.format(what, when)
if data:
postdata += ', "data":"{0}"'.format(str(data))
if tags:
postdata += ', "tags": "{0}"'.format(str(tags))
postdata += '}'
req = Request(self.url_post_event)
req.add_data(postdata)
try:
urlopen(req)
except Exception as _:
# log.exception('Error when sending event to carbon')
pass
|
class CarbonIface(object):
def __init__(self, host, port, event_url=None):
'''Initialize Carbon Interface.
host: host where the carbon daemon is running
port: port where carbon daemon is listening for pickle protocol on host
event_url: web app url where events can be added. It must be provided if add_event(...)
is to be used. Otherwise an exception by urllib2 will raise
'''
pass
def add_data(self, metric, value, ts=None):
'''
Add data to queue
:param metric: the metric name
:type metric: str
:param value: the value of data
:type value: int
:param ts: the timestamp
:type ts: int | None
:return: True if added successfully, otherwise False
:rtype: bool
'''
pass
def add_data_dict(self, dd):
'''
dd must be a dictionary where keys are the metric name,
each key contains a dictionary which at least must have 'value' key (optionally 'ts')
dd = {'experiment1.subsystem.block.metric1': {'value': 12.3, 'ts': 1379491605.55},
'experiment1.subsystem.block.metric2': {'value': 1.35},
...}
'''
pass
def add_data_list(self, dl):
'''
dl must be a list of tuples like:
dl = [('metricname', (timestamp, value)),
('metricname', (timestamp, value)),
...]
'''
pass
def send_data(self, data=None):
'''If data is empty, current buffer is sent. Otherwise data must be like:
data = [('metricname', (timestamp, value)),
('metricname', (timestamp, value)),
...]
'''
pass
def add_event(self, what, data=None, tags=None, when=None):
'''
:param what:
:param data:
:param tags:
:param when:
:return:
'''
pass
| 7 | 6 | 20 | 1 | 12 | 8 | 3 | 0.69 | 1 | 5 | 0 | 0 | 6 | 5 | 6 | 6 | 128 | 11 | 71 | 23 | 64 | 49 | 69 | 22 | 62 | 5 | 1 | 2 | 19 |
4,154 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/notification.py
|
alignak.notification.Notification
|
class Notification(Action): # pylint: disable=too-many-instance-attributes
"""Notification class, inherits from action class. Used to notify contacts
and execute notification command defined in configuration
"""
# AutoSlots create the __slots__ with properties and
# running_properties names
__metaclass__ = AutoSlots
my_type = 'notification'
properties = Action.properties.copy()
properties.update({
'is_a':
StringProp(default=u'notification'),
'start_time':
IntegerProp(default=0, fill_brok=[FULL_STATUS]),
'end_time':
IntegerProp(default=0, fill_brok=[FULL_STATUS]),
'contact_name':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'host_name':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'service_description':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'reason_type':
IntegerProp(default=1, fill_brok=[FULL_STATUS]),
'state':
IntegerProp(default=0, fill_brok=[FULL_STATUS]),
'ack_author':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'ack_data':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'escalated':
BoolProp(default=False, fill_brok=[FULL_STATUS]),
'command_call':
StringProp(default=None),
'contact':
StringProp(default=None),
'notif_nb':
IntegerProp(default=1),
'command':
StringProp(default=u'UNSET'),
'enable_environment_macros':
BoolProp(default=False),
# Keep a list of currently active escalations
'already_start_escalations':
ListProp(default=[]),
'type':
StringProp(default=u'PROBLEM'),
# For authored notifications (eg. downtime...)
'author':
StringProp(default=u'n/a', fill_brok=[FULL_STATUS]),
'author_name':
StringProp(default=u'n/a', fill_brok=[FULL_STATUS]),
'author_alias':
StringProp(default=u'n/a', fill_brok=[FULL_STATUS]),
'author_comment':
StringProp(default=u'n/a', fill_brok=[FULL_STATUS]),
# All contacts that were notified
'recipients':
ListProp(default=[])
})
macros = {
'NOTIFICATIONTYPE': 'type',
'NOTIFICATIONRECIPIENTS': 'recipients',
'NOTIFICATIONISESCALATED': 'escalated',
'NOTIFICATIONAUTHOR': 'author',
'NOTIFICATIONAUTHORNAME': 'author_name',
'NOTIFICATIONAUTHORALIAS': 'author_alias',
'NOTIFICATIONCOMMENT': 'author_comment',
'NOTIFICATIONNUMBER': 'notif_nb',
'NOTIFICATIONID': 'uuid',
'HOSTNOTIFICATIONNUMBER': 'notif_nb',
'HOSTNOTIFICATIONID': 'uuid',
'SERVICENOTIFICATIONNUMBER': 'notif_nb',
'SERVICENOTIFICATIONID': 'uuid'
}
def __init__(self, params=None, parsing=False):
super(Notification, self).__init__(params, parsing=parsing)
self.fill_default()
def __str__(self): # pragma: no cover
return "Notification %s, item: %s, type: %s, status: %s, command:'%s'" \
% (self.uuid, self.ref, self.type, self.status, self.command)
def is_administrative(self):
"""Check if this notification is "administrative"
:return: True in type not in ('PROBLEM', 'RECOVERY'), False otherwise
:rtype: bool
"""
if self.type in ('PROBLEM', 'RECOVERY'):
return False
return True
def get_return_from(self, notif):
"""Setter of exit_status and execution_time attributes
:param notif: notification to get data from
:type notif: alignak.notification.Notification
:return: None
"""
self.exit_status = notif.exit_status
self.execution_time = notif.execution_time
def fill_data_brok_from(self, data, brok_type):
"""Fill data with info of item by looking at brok_type
in props of properties or running_properties
:param data: data to fill
:type data:
:param brok_type: type of brok
:type brok_type:
:return: brok with wanted data
:rtype: alignak.brok.Brok
"""
cls = self.__class__
# Now config properties
for prop, entry in list(cls.properties.items()):
if brok_type in entry.fill_brok:
data[prop] = getattr(self, prop)
def get_initial_status_brok(self, extra=None): # pylint: disable=unused-argument
"""Get a initial status brok
:param extra: not used by the notifications
:type extra: alignak.objects.item.Items
:return: brok with wanted data
:rtype: alignak.brok.Brok
"""
data = {'uuid': self.uuid}
self.fill_data_brok_from(data, FULL_STATUS)
return Brok({'type': 'notification_raise', 'data': data})
def serialize(self, no_json=True, printing=False):
"""This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a Notification
:rtype: dict
"""
res = super(Notification, self).serialize()
# Do not serialize the command call
if 'command_call' in res:
res['command_call'] = 'n/a'
# logger.debug("Serialized notification: %s", res)
# if res['command_call'] is not None:
# if not isinstance(res['command_call'], string_types) and \
# not isinstance(res['command_call'], dict):
# res['command_call'] = res['command_call'].serialize()
return res
|
class Notification(Action):
'''Notification class, inherits from action class. Used to notify contacts
and execute notification command defined in configuration
'''
def __init__(self, params=None, parsing=False):
pass
def __str__(self):
pass
def is_administrative(self):
'''Check if this notification is "administrative"
:return: True in type not in ('PROBLEM', 'RECOVERY'), False otherwise
:rtype: bool
'''
pass
def get_return_from(self, notif):
'''Setter of exit_status and execution_time attributes
:param notif: notification to get data from
:type notif: alignak.notification.Notification
:return: None
'''
pass
def fill_data_brok_from(self, data, brok_type):
'''Fill data with info of item by looking at brok_type
in props of properties or running_properties
:param data: data to fill
:type data:
:param brok_type: type of brok
:type brok_type:
:return: brok with wanted data
:rtype: alignak.brok.Brok
'''
pass
def get_initial_status_brok(self, extra=None):
'''Get a initial status brok
:param extra: not used by the notifications
:type extra: alignak.objects.item.Items
:return: brok with wanted data
:rtype: alignak.brok.Brok
'''
pass
def serialize(self, no_json=True, printing=False):
'''This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a Notification
:rtype: dict
'''
pass
| 8 | 6 | 10 | 1 | 4 | 6 | 2 | 0.51 | 1 | 3 | 1 | 0 | 7 | 3 | 7 | 22 | 162 | 23 | 94 | 19 | 86 | 48 | 32 | 18 | 24 | 3 | 4 | 2 | 11 |
4,155 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/arbiterlink.py
|
alignak.objects.arbiterlink.ArbiterLink
|
class ArbiterLink(SatelliteLink):
"""
Class to manage the link to Arbiter daemon.
With it, a master arbiter can communicate with a spare Arbiter daemon
"""
my_type = 'arbiter'
my_name_property = "%s_name" % my_type
properties = SatelliteLink.properties.copy()
properties.update({
'type':
StringProp(default=u'arbiter', fill_brok=[FULL_STATUS], to_send=True),
'arbiter_name':
StringProp(default='', fill_brok=[FULL_STATUS]),
'host_name':
StringProp(default=socket.gethostname(), to_send=True),
'port':
IntegerProp(default=7770, to_send=True),
'last_master_speak':
FloatProp(default=0.0)
})
def is_me(self): # pragma: no cover, seems not to be used anywhere
"""Check if parameter name if same than name of this object
TODO: is it useful?
:return: true if parameter name if same than this name
:rtype: bool
"""
logger.info("And arbiter is launched with the hostname:%s "
"from an arbiter point of view of addr:%s", self.host_name, socket.getfqdn())
return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname()
def do_not_run(self):
"""Check if satellite running or not
If not, try to run
:return: true if satellite not running
:rtype: bool
"""
logger.debug("[%s] do_not_run", self.name)
try:
self.con.get('_do_not_run')
return True
except HTTPClientConnectionException as exp: # pragma: no cover, simple protection
self.add_failed_check_attempt("Connection error when "
"sending do not run: %s" % str(exp))
self.set_dead()
except HTTPClientTimeoutException as exp: # pragma: no cover, simple protection
self.add_failed_check_attempt("Connection timeout when "
"sending do not run: %s" % str(exp))
except HTTPClientException as exp:
self.add_failed_check_attempt("Error when "
"sending do not run: %s" % str(exp))
return False
|
class ArbiterLink(SatelliteLink):
'''
Class to manage the link to Arbiter daemon.
With it, a master arbiter can communicate with a spare Arbiter daemon
'''
def is_me(self):
'''Check if parameter name if same than name of this object
TODO: is it useful?
:return: true if parameter name if same than this name
:rtype: bool
'''
pass
def do_not_run(self):
'''Check if satellite running or not
If not, try to run
:return: true if satellite not running
:rtype: bool
'''
pass
| 3 | 3 | 18 | 3 | 10 | 7 | 3 | 0.47 | 1 | 4 | 3 | 0 | 2 | 0 | 2 | 72 | 58 | 8 | 36 | 7 | 33 | 17 | 21 | 6 | 18 | 4 | 4 | 1 | 5 |
4,156 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_macros_resolver.py
|
tests.test_macros_resolver.MacroResolverTester
|
class MacroResolverTester(object):
def get_hst_svc(self):
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
hst = self._scheduler.hosts.find_by_name("test_host_0")
return (svc, hst)
def test_resolv_simple(self):
"""Test a simple macro resolution
:return:
"""
# These are macros built from a variable declare in alignak.ini file
# ; Some macros for the tests
# $alignak_test_macro=test macro
# _alignak_test_macro2=test macro 2
result = self.mr.resolve_simple_macros_in_string("$ALIGNAK_TEST_MACRO$", [], None, None, None)
assert result == "test macro"
result = self.mr.resolve_simple_macros_in_string("$ALIGNAK_TEST_MACRO2$", [], None, None, None)
assert result == "test macro 2"
# These are macros read from a pack. section of the alignak.ini configuration
result = self.mr.resolve_simple_macros_in_string("$SMTP_SERVER$", [], None, None, None)
assert result == "your_smtp_server_address"
result = self.mr.resolve_simple_macros_in_string("$MAIL_FROM$", [], None, None, None)
assert result == "alignak@monitoring"
# This is a macro built from a variable that is a string
result = self.mr.resolve_simple_macros_in_string("$ALIGNAK$", [], None, None, None)
assert result == "My Alignak"
# This is a macro built from a variable that is a list of strings
result = self.mr.resolve_simple_macros_in_string("$ALIGNAK_CONFIG$", [], None, None, None)
assert isinstance(result, string_types)
expected = "[%s]" % ','.join(self.alignak_env.cfg_files)
assert result == expected
# This is a macro built from a dynamic variable
result = self.mr.resolve_simple_macros_in_string("$MAINCONFIGFILE$", [], None, None, None)
assert result == os.path.abspath(os.path.join(self._test_dir, self.setup_file))
result = self.mr.resolve_simple_macros_in_string("$MAINCONFIGDIR$", [], None, None, None)
assert result == os.path.abspath(os.path.join(self._test_dir, './cfg'))
# This is an empty macro -> ''
result = self.mr.resolve_simple_macros_in_string("$COMMENTDATAFILE$", [], None, None, None)
assert result == ""
# This is a macro built from an Alignak variable - because the variable is prefixed with _
# The macro name is built from the uppercased variable name without the leading
# and trailing underscores: _dist -> $DIST$
result = self.mr.resolve_simple_macros_in_string("$DIST$", [], None, None, None)
assert result == "/tmp"
# Alignak variable interpolated from %(var) is available as a macro
result = self.mr.resolve_simple_macros_in_string("$DIST_ETC$", [], None, None, None)
assert result == "/tmp/etc/alignak"
# # Alignak "standard" variable is not available as a macro
# # Empty value ! todo: Perharps should be changed ?
# Sometimes the user is defined to alignak for test purpose and it remans set to this value!
# result = self.mr.resolve_simple_macros_in_string("$USER$", [], None, None, None)
# assert result == ""
def test_resolv_simple_command(self):
"""Test a simple command resolution
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
macros_command = self.mr.resolve_command(svc.check_command, data,
self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert macros_command == "plugins/test_servicecheck.pl --type=ok --failchance=5% " \
"--previous-state=OK --state-duration=0 " \
"--total-critical-on-host=0 --total-warning-on-host=0 " \
"--hostname test_host_0 --servicedesc test_ok_0"
# @pytest.mark.skip(reason="A macro remains valued where all should be reset to default!")
def test_args_macro(self):
"""
Test ARGn macros
:return:
"""
print("Initial test macros: %d - %s" % (len(self._scheduler.pushed_conf.__class__.macros),
self._scheduler.pushed_conf.__class__.macros))
print(" - : %s" % (self._scheduler.pushed_conf.__class__.properties['$USER1$']))
print(" - : %s" % (self._scheduler.pushed_conf.properties['$USER1$']))
print(" - : %s" % (getattr(self._scheduler.pushed_conf, '$USER1$', None)))
for key in self._scheduler.pushed_conf.__class__.macros:
key = self._scheduler.pushed_conf.__class__.macros[key]
if key:
value = getattr(self._scheduler.pushed_conf.properties, key, '')
print(" - %s : %s" % (key, self._scheduler.pushed_conf.properties[key]))
if value:
print("- %s = %s" % (key, value))
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
# command_with_args is defined with 5 arguments as:
# $PLUGINSDIR$/command -H $HOSTADDRESS$ -t 9 -u -c $ARG1$
# -a $ARG2$ $ARG3$ $ARG4$ and the last is $ARG5$.
# No arguments are provided - will be valued as empty strings
dummy_call = "command_with_args"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# todo: Test problem is here!
# Whereas we should get:
assert macros_command == 'plugins/command -H 127.0.0.1 -t 9 -u -c -a and the last is .'
# We get:
# assert macros_command == '/var/lib/shinken/libexec/command -H 127.0.0.1 -t 9 -u -c -a and the last is .'
# Outside the test env, everything is ok ! Because some tests executed before the macro
# do not have the correct value!
# Extra arguments are provided - will be ignored
dummy_call = "command_with_args!arg_1!arg_2!arg_3!arg_4!arg_5!extra argument"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert macros_command == 'plugins/command -H 127.0.0.1 -t 9 -u -c arg_1 ' \
'-a arg_2 arg_3 arg_4 and the last is arg_5.'
# All arguments are provided
dummy_call = "command_with_args!arg_1!arg_2!arg_3!arg_4!arg_5"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert macros_command == 'plugins/command -H 127.0.0.1 -t 9 -u -c arg_1 ' \
'-a arg_2 arg_3 arg_4 and the last is arg_5.'
def test_datetime_macros(self):
""" Test date / time macros: SHORTDATETIME, LONGDATETIME, DATE, TIME, ...
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
hst.state = 'UP'
# Long and short datetime
dummy_call = "special_macro!$LONGDATETIME$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
dummy_call = "special_macro!$SHORTDATETIME$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
dummy_call = "special_macro!$DATE$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
dummy_call = "special_macro!$TIME$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
dummy_call = "special_macro!$TIMET$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# Do not check that the output of these macro is correct
# because there is no specific macro code for those functions ;)
# Process and event start time
dummy_call = "special_macro!$PROCESSSTARTTIME$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing n/a' == macros_command
dummy_call = "special_macro!$EVENTSTARTTIME$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing n/a' == macros_command
def test_summary_macros(self):
""" Test summary macros: TOTALHOSTSUP, TOTALHOSTDOWN, ...
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
hst.state = 'UP'
# Number of hosts UP / DOWN / UNREACHABLE
dummy_call = "special_macro!$TOTALHOSTSUP$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# All 3 hosts are UP
assert 'plugins/nothing 3' == macros_command
dummy_call = "special_macro!$TOTALHOSTPROBLEMS$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
dummy_call = "special_macro!$TOTALHOSTPROBLEMSUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Now my host is DOWN and not yet handled
hst.state = 'DOWN'
hst.is_problem = True
hst.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALHOSTSDOWN$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
dummy_call = "special_macro!$TOTALHOSTSDOWNUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Now my host is DOWN but handled
hst.problem_has_been_acknowledged = True
dummy_call = "special_macro!$TOTALHOSTSDOWNUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Now my host is UNREACHABLE and not yet handled
hst.state = 'UNREACHABLE'
hst.is_problem = True
hst.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALHOSTSUNREACHABLE$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
dummy_call = "special_macro!$TOTALHOSTSUNREACHABLEUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Now my host is UNREACHABLE but handled
hst.problem_has_been_acknowledged = True
dummy_call = "special_macro!$TOTALHOSTSUNREACHABLEUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Now my host is DOWN and not yet handled
hst.state = 'DOWN'
hst.is_problem = True
hst.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALHOSTPROBLEMS$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
dummy_call = "special_macro!$TOTALHOSTPROBLEMSUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Now my host is UP and no more a problem
hst.state = 'UP'
hst.is_problem = False
hst.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALHOSTPROBLEMS$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
dummy_call = "special_macro!$TOTALHOSTPROBLEMSUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Number of services OK / WARNING / CRITICAL / UNKNOWN
dummy_call = "special_macro!$TOTALSERVICESOK$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 2' == macros_command
# Now my service is WARNING and not handled
svc.state = 'WARNING'
svc.is_problem = True
svc.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALSERVICESWARNING$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
dummy_call = "special_macro!$TOTALSERVICESWARNINGUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Now my service problem is handled
svc.problem_has_been_acknowledged = True
dummy_call = "special_macro!$TOTALSERVICESWARNINGUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Now my service is CRITICAL and not handled
svc.state = 'CRITICAL'
svc.is_problem = True
svc.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALSERVICESCRITICAL$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
dummy_call = "special_macro!$TOTALSERVICESCRITICALUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Now my service problem is handled
svc.problem_has_been_acknowledged = True
dummy_call = "special_macro!$TOTALSERVICESCRITICALUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Now my service is UNKNOWN and not handled
svc.state = 'UNKNOWN'
svc.is_problem = True
svc.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALSERVICESUNKNOWN$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
dummy_call = "special_macro!$TOTALSERVICESUNKNOWNUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Now my service problem is handled
svc.problem_has_been_acknowledged = True
dummy_call = "special_macro!$TOTALSERVICESUNKNOWNUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Now my service is WARNING and not handled
svc.state = 'WARNING'
svc.is_problem = True
svc.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALSERVICEPROBLEMS$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
dummy_call = "special_macro!$TOTALSERVICEPROBLEMSUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Now my service is OK and no more a problem
svc.state = 'OK'
svc.is_problem = False
svc.problem_has_been_acknowledged = False
dummy_call = "special_macro!$TOTALSERVICEPROBLEMS$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
dummy_call = "special_macro!$TOTALSERVICEPROBLEMSUNHANDLED$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
def test_special_macros_realm(self):
"""
Call the resolver with a special macro HOSTREALM
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
hst.state = 'UP'
dummy_call = "special_macro!$HOSTREALM$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# Macro raised the default realm (All)
assert 'plugins/nothing All' == macros_command
def test_escape_macro(self):
"""
Call the resolver with an empty macro ($$)
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
hst.state = 'UP'
dummy_call = "special_macro!$$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# Not a macro but $$ is transformed as $
assert 'plugins/nothing $' == macros_command
def test_unicode_macro(self):
"""
Call the resolver with a unicode content
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
hst.state = 'UP'
hst.output = u"На берегу пустынных волн"
dummy_call = "special_macro!$HOSTOUTPUT$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# Output is correctly restitued
assert u'plugins/nothing На берегу пустынных волн' == macros_command
hst.state = 'UP'
hst.output = 'Père Noël'
dummy_call = "special_macro!$HOSTOUTPUT$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# Output is correctly restitued
assert u'plugins/nothing Père Noël' == macros_command
hst.state = 'UP'
hst.output = 'Père Noël'
dummy_call = "special_macro!$HOSTOUTPUT$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
# Output is correctly restitued
assert u'plugins/nothing Père Noël' == macros_command
def test_illegal_macro_output_chars(self):
""" Check output macros are cleaned from illegal macro characters
$HOSTOUTPUT$, $HOSTPERFDATA$, $HOSTACKAUTHOR$, $HOSTACKCOMMENT$,
$SERVICEOUTPUT$, $SERVICEPERFDATA$, $SERVICEACKAUTHOR$, $SERVICEACKCOMMENT$
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
illegal_macro_output_chars = \
self._scheduler.pushed_conf.illegal_macro_output_chars
print("Illegal macros caracters:", illegal_macro_output_chars)
hst.output = 'fake output'
dummy_call = "special_macro!$HOSTOUTPUT$"
for c in illegal_macro_output_chars:
hst.output = 'fake output' + c
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
print(macros_command)
assert 'plugins/nothing fake output' == macros_command
def test_env_macros(self):
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
data.append(self._arbiter.conf)
# Macros are existing in the environment with a prefix which defaults to ALIGNAK_
# but this prefix may be overriden in the configuration
# assert self.mr.env_prefix == 'ALIGNAK_'
env = self.mr.get_env_macros(data)
assert env != {}
assert 'test_host_0' == env['%sHOSTNAME' % self.mr.env_prefix]
assert 0.0 == env['%sSERVICEPERCENTCHANGE' % self.mr.env_prefix]
assert 'custvalue' == env['%s_SERVICECUSTNAME' % self.mr.env_prefix]
assert 'gnulinux' == env['%s_HOSTOSTYPE' % self.mr.env_prefix]
assert '%sUSER1' % self.mr.env_prefix not in env
def test_resource_file(self):
"""
Test macros defined in configuration files
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
# $USER1$ macro is defined as 'plugins' in the configuration file
dummy_call = "special_macro!$USER1$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing plugins' == macros_command
# $PLUGINSDIR$ macro is defined as $USER1$ in the configuration file
dummy_call = "special_macro!$PLUGINSDIR$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing plugins' == macros_command
# $INTERESTINGVARIABLE$ macro is defined as 'interesting_value' in the configuration file
dummy_call = "special_macro!$INTERESTINGVARIABLE$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing interesting_value' == macros_command
# Look for multiple = in lines, should split the first
# and keep others in the macro value
dummy_call = "special_macro!$ANOTHERVALUE$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing first=second' == macros_command
def test_ondemand_macros(self):
"""Test on-demand macros
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
hst.state = 'UP'
svc.state = 'UNKNOWN'
# Get another service
svc2 = self._scheduler.pushed_conf.services.find_srv_by_name_and_hostname(
"test_host_0", "test_another_service"
)
svc2.output = 'you should not pass'
# Request a not existing macro
dummy_call = "special_macro!$HOSTXXX:test_host_0$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing' == macros_command
# Request a specific host state
dummy_call = "special_macro!$HOSTSTATE:test_host_0$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing UP' == macros_command
# Call with a void host name, means : myhost
data = [hst]
dummy_call = "special_macro!$HOSTSTATE:$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing UP' == macros_command
# Now with a service, for our implicit host state
data = [hst, svc]
dummy_call = "special_macro!$HOSTSTATE:test_host_0$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing UP' == macros_command
# Now with a service, for our implicit host state (missing host ...)
data = [hst, svc]
dummy_call = "special_macro!$HOSTSTATE:$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing UP' == macros_command
# Now call this data from our previous service - get service state
data = [hst, svc2]
dummy_call = "special_macro!$SERVICESTATE:test_host_0:test_another_service$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing OK' == macros_command
# Now call this data from our previous service - get service output
data = [hst, svc2]
dummy_call = "special_macro!$SERVICEOUTPUT:test_host_0:test_another_service$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing you should not pass' == macros_command
# Ok now with a host implicit way
svc2.output = 'you should not pass'
data = [hst, svc2]
dummy_call = "special_macro!$SERVICEOUTPUT::test_another_service$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing you should not pass' == macros_command
def test_host_macros(self):
"""Test host macros
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
# First group name
dummy_call = "special_macro!$HOSTGROUPNAME$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert macros_command == 'plugins/nothing allhosts'
# All group names
dummy_call = "special_macro!$HOSTGROUPNAMES$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert macros_command == 'plugins/nothing allhosts,hostgroup_01,up'
# First group alias
dummy_call = "special_macro!$HOSTGROUPALIAS$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert macros_command == 'plugins/nothing All Hosts'
# All group aliases
dummy_call = "special_macro!$HOSTGROUPALIASES$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert macros_command == 'plugins/nothing All Hosts,All Up Hosts,hostgroup_alias_01'
def test_host_count_services_macros(self):
"""Test services count for an hostmacros
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
hst.state = 'UP'
# Get another service
svc2 = self._scheduler.pushed_conf.services.find_srv_by_name_and_hostname(
"test_host_0", "test_another_service"
)
svc2.output = 'you should not pass'
# Total
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICES$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 2' == macros_command
# Services states
svc.state_id = 0
svc.state = 'OK'
svc2.state_id = 1
svc2.state = 'WARNING'
# Ok
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESOK$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Warning
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESWARNING$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Critical
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESCRITICAL$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Unknown
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESUNKNOWN$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Unreachable
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESUNREACHABLE$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Change states
svc.state_id = 2
svc.state = 'CRITICAL'
svc2.state_id = 3
svc2.state = 'UNKNOWN'
# Ok
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESOK$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Warning
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESWARNING$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
# Critical
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESCRITICAL$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Unknown
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESUNKNOWN$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 1' == macros_command
# Unreachable
svc.output = 'you should not pass'
data = [hst, svc]
dummy_call = "special_macro!$TOTALHOSTSERVICESUNREACHABLE$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 0' == macros_command
def test_contact_custom_macros(self):
"""
Test on-demand macros with custom variables for contacts
:return:
"""
contact = self._scheduler.contacts.find_by_name("test_macro_contact")
data = [contact]
# Parse custom macro to get contact custom variables based upon a fixed value
# contact has a custom variable defined as _custom1 = value
dummy_call = "special_macro!$_CONTACTCUSTOM1$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing value' == macros_command
# Parse custom macro to get service custom variables based upon another macro
# host has a custom variable defined as _custom2 = $CONTACTNAME$
dummy_call = "special_macro!$_CONTACTCUSTOM2$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing test_macro_contact' == macros_command
def test_host_custom_macros(self):
"""
Test on-demand macros with custom variables for hosts
:return:
"""
hst = self._scheduler.hosts.find_by_name("test_macro_host")
# The host has custom variables, thus we may use them in a macro
assert hst.customs is not []
assert '_CUSTOM1' in hst.customs
assert '_CUSTOM2' in hst.customs
# Force declare an integer customs variable
hst.customs['_CUSTOM3'] = 10
print((hst.customs))
data = [hst]
# Parse custom macro to get host custom variables based upon a fixed value
# host has a custom variable defined as _custom1 = value
dummy_call = "special_macro!$_HOSTCUSTOM1$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing value' == macros_command
# Parse custom macro to get host custom variables based upon another macro
# host has a custom variable defined as _custom2 = $HOSTNAME$
dummy_call = "special_macro!$_HOSTCUSTOM2$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing test_macro_host' == macros_command
# Parse custom macro to get host custom variables based upon another macro
# host has a custom variable defined as _custom2 = $HOSTNAME$
dummy_call = "special_macro!$_HOSTCUSTOM3$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
print("Command: %s" % macros_command)
assert 'plugins/nothing 10' == macros_command
def test_service_custom_macros(self):
"""
Test on-demand macros with custom variables for services
:return:
"""
(svc, hst) = self.get_hst_svc()
# Get the second service
svc2 = self._arbiter.conf.services.find_srv_by_name_and_hostname(
"test_host_0", "test_another_service"
)
data = [hst, svc2]
# Parse custom macro to get service custom variables based upon a fixed value
# special_macro is defined as: $USER1$/nothing $ARG1$
dummy_call = "special_macro!$_SERVICECUSTOM1$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing value' == macros_command
# Parse custom macro to get service custom variables based upon another macro
dummy_call = "special_macro!$_SERVICECUSTOM2$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing test_host_0' == macros_command
def test_hostadressX_macros(self):
"""
Host addresses macros
:return:
"""
(svc, hst) = self.get_hst_svc()
data = [hst, svc]
# Ok sample host call
dummy_call = "special_macro!$HOSTADDRESS$"
cc = CommandCall({"commands": self._arbiter.conf.commands, "command_line": dummy_call}, parsing=True)
macros_command = self.mr.resolve_command(cc, data, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert 'plugins/nothing 127.0.0.1' == macros_command
|
class MacroResolverTester(object):
def get_hst_svc(self):
pass
def test_resolv_simple(self):
'''Test a simple macro resolution
:return:
'''
pass
def test_resolv_simple_command(self):
'''Test a simple command resolution
:return:
'''
pass
def test_args_macro(self):
'''
Test ARGn macros
:return:
'''
pass
def test_datetime_macros(self):
''' Test date / time macros: SHORTDATETIME, LONGDATETIME, DATE, TIME, ...
:return:
'''
pass
def test_summary_macros(self):
''' Test summary macros: TOTALHOSTSUP, TOTALHOSTDOWN, ...
:return:
'''
pass
def test_special_macros_realm(self):
'''
Call the resolver with a special macro HOSTREALM
:return:
'''
pass
def test_escape_macro(self):
'''
Call the resolver with an empty macro ($$)
:return:
'''
pass
def test_unicode_macro(self):
'''
Call the resolver with a unicode content
:return:
'''
pass
def test_illegal_macro_output_chars(self):
''' Check output macros are cleaned from illegal macro characters
$HOSTOUTPUT$, $HOSTPERFDATA$, $HOSTACKAUTHOR$, $HOSTACKCOMMENT$,
$SERVICEOUTPUT$, $SERVICEPERFDATA$, $SERVICEACKAUTHOR$, $SERVICEACKCOMMENT$
'''
pass
def test_env_macros(self):
pass
def test_resource_file(self):
'''
Test macros defined in configuration files
:return:
'''
pass
def test_ondemand_macros(self):
'''Test on-demand macros
:return:
'''
pass
def test_host_macros(self):
'''Test host macros
:return:
'''
pass
def test_host_count_services_macros(self):
'''Test services count for an hostmacros
:return:
'''
pass
def test_contact_custom_macros(self):
'''
Test on-demand macros with custom variables for contacts
:return:
'''
pass
def test_host_custom_macros(self):
'''
Test on-demand macros with custom variables for hosts
:return:
'''
pass
def test_service_custom_macros(self):
'''
Test on-demand macros with custom variables for services
:return:
'''
pass
def test_hostadressX_macros(self):
'''
Host addresses macros
:return:
'''
pass
| 20 | 17 | 44 | 4 | 32 | 9 | 1 | 0.28 | 1 | 1 | 1 | 2 | 19 | 0 | 19 | 19 | 864 | 93 | 601 | 112 | 581 | 170 | 508 | 112 | 488 | 4 | 1 | 3 | 23 |
4,157 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_poller_tag.py
|
tests.test_poller_tag.TestPollerTag
|
class TestPollerTag(AlignakTest):
"""This class tests the poller tag of check
"""
def setUp(self):
super(TestPollerTag, self).setUp()
self.setup_with_file('cfg/cfg_poller_tag.cfg',
'cfg/poller_tag/alignak.ini',
dispatching=True)
self.assertTrue(self.conf_is_correct)
# Our scheduler
self._sched = self._scheduler
# No error messages
self.assertEqual(len(self.configuration_errors), 0)
# No warning messages
self.assertEqual(len(self.configuration_warnings), 0)
def test_poller_tag_command(self):
"""We have a command defined with poller_tag: north
:return:None
"""
host = self._sched.hosts.find_by_name("test_host_pt_01")
self.external_command_loop()
checks = list(self._scheduler.checks.values())
mycheck = self._sched.checks[host.checks_in_progress[0]]
assert mycheck.poller_tag == 'north'
def test_poller_tag_host(self):
"""We have a host with a poller_tag: south
:return: None
"""
host = self._sched.hosts.find_by_name("test_host_pt_02")
self.external_command_loop()
checks = list(self._scheduler.checks.values())
mycheck = self._sched.checks[host.checks_in_progress[0]]
assert mycheck.poller_tag == 'south'
def test_poller_tag_host_command(self):
"""We have a command with poller_tag: north
and a host with poller_tag: south
:return: None
"""
host = self._sched.hosts.find_by_name("test_host_pt_03")
self.external_command_loop()
checks = list(self._scheduler.checks.values())
mycheck = self._sched.checks[host.checks_in_progress[0]]
assert mycheck.poller_tag == 'south'
def test_poller_tag_service(self):
"""We have a service with a poller_tag: north
:return: None
"""
svc = self._sched.services.find_srv_by_name_and_hostname("test_router_0", "test_ok_pt_01")
svc.checks_in_progress = []
svc.act_depend_of = []
self.external_command_loop()
checks = list(self._scheduler.checks.values())
mycheck = self._sched.checks[svc.checks_in_progress[0]]
assert mycheck.poller_tag == 'north'
def test_poller_tag_service_command(self):
"""We have a service with a poller_tag: south
and a command with poller_tag: north
:return: None
"""
svc = self._sched.services.find_srv_by_name_and_hostname("test_router_0", "test_ok_pt_02")
svc.checks_in_progress = []
svc.act_depend_of = []
self.external_command_loop()
checks = list(self._scheduler.checks.values())
mycheck = self._sched.checks[svc.checks_in_progress[0]]
assert mycheck.poller_tag == 'south'
def test_poller_tag_service_host(self):
"""We have a service with a poller_tag: north
and a host with poller_tag: south
:return: None
"""
svc = self._sched.services.find_srv_by_name_and_hostname("test_host_pt_02", "test_ok_pt_03")
svc.checks_in_progress = []
svc.act_depend_of = []
self.external_command_loop()
checks = list(self._scheduler.checks.values())
mycheck = self._sched.checks[svc.checks_in_progress[0]]
assert mycheck.poller_tag == 'north'
def test_poller_master_get_checks(self):
"""Test function get right checks based on the poller_tag: None (it's the default tag)
:return: None
"""
self.external_command_loop()
for check in list(self._sched.checks.values()):
check.t_to_go = 0
checks = self._sched.get_to_run_checks(do_checks=True, poller_tags=['None'],
module_types=['fork'])
print(checks)
assert len(checks) == 3
for check in checks:
assert check.poller_tag == 'None'
def test_poller_north_get_checks(self):
"""Test function get right checks based on the poller_tag: north
:return: None
"""
self.external_command_loop()
for check in list(self._sched.checks.values()):
check.t_to_go = 0
checks = self._sched.get_to_run_checks(do_checks=True, poller_tags=['north'],
module_types=['fork'])
print(checks)
assert len(checks) == 3
for check in checks:
assert check.poller_tag == 'north'
def test_poller_south_get_checks(self):
"""
Test function get right checks based on the poller_tag: south
:return: None
"""
self.external_command_loop()
for check in list(self._sched.checks.values()):
check.t_to_go = 0
checks = self._sched.get_to_run_checks(do_checks=True, poller_tags=['south'],
module_types=['fork'])
print(checks)
assert len(checks) == 4
for check in checks:
assert check.poller_tag == 'south'
|
class TestPollerTag(AlignakTest):
'''This class tests the poller tag of check
'''
def setUp(self):
pass
def test_poller_tag_command(self):
'''We have a command defined with poller_tag: north
:return:None
'''
pass
def test_poller_tag_host(self):
'''We have a host with a poller_tag: south
:return: None
'''
pass
def test_poller_tag_host_command(self):
'''We have a command with poller_tag: north
and a host with poller_tag: south
:return: None
'''
pass
def test_poller_tag_service(self):
'''We have a service with a poller_tag: north
:return: None
'''
pass
def test_poller_tag_service_command(self):
'''We have a service with a poller_tag: south
and a command with poller_tag: north
:return: None
'''
pass
def test_poller_tag_service_host(self):
'''We have a service with a poller_tag: north
and a host with poller_tag: south
:return: None
'''
pass
def test_poller_master_get_checks(self):
'''Test function get right checks based on the poller_tag: None (it's the default tag)
:return: None
'''
pass
def test_poller_north_get_checks(self):
'''Test function get right checks based on the poller_tag: north
:return: None
'''
pass
def test_poller_south_get_checks(self):
'''
Test function get right checks based on the poller_tag: south
:return: None
'''
pass
| 11 | 10 | 13 | 1 | 8 | 3 | 2 | 0.44 | 1 | 2 | 0 | 0 | 10 | 1 | 10 | 65 | 138 | 20 | 82 | 36 | 71 | 36 | 77 | 36 | 66 | 3 | 2 | 1 | 16 |
4,158 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_perfdata_parsing.py
|
tests.test_perfdata_parsing.TestPerfdataParsing
|
class TestPerfdataParsing(AlignakTest):
""" Test performance data parsing """
def setUp(self):
super(TestPerfdataParsing, self).setUp()
def test_perfdata_parsing(self):
""" Test parsing performance data
"""
# Get a metric from a string
perf_data_string = 'ramused=90%;85;95;;'
metric = Metric(perf_data_string)
assert 'ramused' == metric.name
assert 90 == metric.value
assert '%' == metric.uom
assert 85 == metric.warning
assert 95 == metric.critical
assert 0 == metric.min
assert 100 == metric.max
# Get only the first metric if several are present
perf_data_string = 'ramused=1009MB;;;0;1982 ' \
'swapused=540MB;;;0;3827 ' \
'memused=1550MB;2973;3964;0;5810'
metric = Metric(perf_data_string)
assert 'ramused' == metric.name
assert 1009 == metric.value
assert 'MB' == metric.uom
assert None == metric.warning
assert None == metric.critical
assert 0 == metric.min
assert 1982 == metric.max
# Get performance data from a string
perf_data_string = 'ramused=1009MB;;;0;1982 ' \
'swapused=540MB;;;; ' \
'memused=90%'
perf_data = PerfDatas(perf_data_string)
# Get a metrics dictionary
assert isinstance(perf_data.metrics, dict)
assert 3 == len(perf_data)
metric = perf_data['ramused']
assert 'ramused' == metric.name
assert 1009 == metric.value
assert 'MB' == metric.uom
assert None == metric.warning
assert None == metric.critical
assert 0 == metric.min
assert 1982 == metric.max
metric = perf_data['swapused']
assert 'swapused' == metric.name
assert 540 == metric.value
assert 'MB' == metric.uom
assert None == metric.warning
assert None == metric.critical
assert None == metric.min
assert None == metric.max
metric = perf_data['memused']
assert 'memused' == metric.name
assert 90 == metric.value
assert '%' == metric.uom
assert None == metric.warning
assert None == metric.critical
assert 0 == metric.min
assert 100 == metric.max
def test_perfdata_space_characters(self):
""" Create a perfdata with name containing space
"""
# Metrics name can contain space characters
perf_data_string = "'Physical Memory Used'=12085620736Bytes; " \
"'Physical Memory Utilisation'=94%;80;90;"
perf_data = PerfDatas(perf_data_string)
# Get a metrics dictionary
assert isinstance(perf_data.metrics, dict)
assert 2 == len(perf_data)
metric = perf_data['Physical Memory Used']
assert 'Physical Memory Used' == metric.name
assert 12085620736 == metric.value
assert 'Bytes' == metric.uom
assert None is metric.warning
assert None is metric.critical
assert None is metric.min
assert None is metric.max
metric = perf_data['Physical Memory Utilisation']
assert 'Physical Memory Utilisation' == metric.name
assert 94 == metric.value
assert '%' == metric.uom
assert 80 == metric.warning
assert 90 == metric.critical
assert 0 == metric.min
assert 100 == metric.max
def test_perfdata_special_characters(self):
""" Create a perfdata with name containing special characters
"""
# Metrics name can contain special characters
perf_data_string = "'C: Space'=35.07GB; 'C: Utilisation'=87.7%;90;95;"
perf_data = PerfDatas(perf_data_string)
# Get a metrics dictionary
assert isinstance(perf_data.metrics, dict)
assert 2 == len(perf_data)
metric = perf_data['C: Space']
assert 'C: Space' == metric.name
assert 35.07 == metric.value
assert 'GB' == metric.uom
assert None is metric.warning
assert None is metric.critical
assert None is metric.min
assert None is metric.max
metric = perf_data['C: Utilisation']
assert 'C: Utilisation' == metric.name
assert 87.7 == metric.value
assert '%' == metric.uom
assert 90 == metric.warning
assert 95 == metric.critical
assert 0 == metric.min
assert 100 == metric.max
# Metrics name can contain special characters
perf_data_string = "'C: used'=13.06452GB;22.28832;25.2601;0;29.71777 " \
"'C: used %'=44%;75;85;0;100"
perf_data = PerfDatas(perf_data_string)
# Get a metrics dictionary
assert isinstance(perf_data.metrics, dict)
assert 2 == len(perf_data)
metric = perf_data['C: used']
assert 'C: used' == metric.name
assert 13.06452 == metric.value
assert 'GB' == metric.uom
assert 22.28832 == metric.warning
assert 25.2601 == metric.critical
assert 0 is metric.min
assert 29.71777 == metric.max
metric = perf_data['C: used %']
assert 'C: used %' == metric.name
assert 44 == metric.value
assert '%' == metric.uom
assert 75 == metric.warning
assert 85 == metric.critical
assert 0 is metric.min
assert 100 == metric.max
def test_perfdata_floating_value(self):
""" Create a perfdata with complex floating value
"""
# Metrics value can contain complex floating value
perf_data_string = "time_offset-192.168.0.1=-7.22636468709e-05s;1;2;0;;"
perf_data = PerfDatas(perf_data_string)
# Get a metrics dictionary
assert isinstance(perf_data.metrics, dict)
assert 1 == len(perf_data)
metric = perf_data['time_offset-192.168.0.1']
assert 'time_offset-192.168.0.1' == metric.name
assert -7.22636468709e-05 == metric.value
assert 's' == metric.uom
assert 1 == metric.warning
assert 2 == metric.critical
assert 0 == metric.min
assert None is metric.max
def test_perfdata_accented_characters(self):
""" Create a perfdata with accented characters
"""
# Metrics name can contain accented and special characters
perf_data_string = "àéèï-192.168.0.1=-7.22636468709e-05s;1;2;0;;"
perf_data = PerfDatas(perf_data_string)
# Get a metrics dictionary
assert isinstance(perf_data.metrics, dict)
assert 1 == len(perf_data)
metric = perf_data['àéèï-192.168.0.1']
assert metric.name == 'àéèï-192.168.0.1'
assert metric.value == -7.22636468709e-05
assert metric.uom == 's'
assert metric.warning == 1
assert metric.critical == 2
assert metric.min == 0
assert metric.max == None
def test_perfdata_empty_string(self):
""" Create a perfdata from an empty string
"""
perf_data_string = None
perf_data = PerfDatas(perf_data_string)
assert len(perf_data) == 0
perf_data_string = ''
perf_data = PerfDatas(perf_data_string)
assert len(perf_data) == 0
|
class TestPerfdataParsing(AlignakTest):
''' Test performance data parsing '''
def setUp(self):
pass
def test_perfdata_parsing(self):
''' Test parsing performance data
'''
pass
def test_perfdata_space_characters(self):
''' Create a perfdata with name containing space
'''
pass
def test_perfdata_special_characters(self):
''' Create a perfdata with name containing special characters
'''
pass
def test_perfdata_floating_value(self):
''' Create a perfdata with complex floating value
'''
pass
def test_perfdata_accented_characters(self):
''' Create a perfdata with accented characters
'''
pass
def test_perfdata_empty_string(self):
''' Create a perfdata from an empty string
'''
pass
| 8 | 7 | 27 | 2 | 21 | 4 | 1 | 0.18 | 1 | 4 | 2 | 0 | 7 | 0 | 7 | 62 | 199 | 21 | 151 | 25 | 143 | 27 | 145 | 25 | 137 | 1 | 2 | 0 | 7 |
4,159 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_perfdata_commands.py
|
tests.test_perfdata_commands.TestPerfdataCommands
|
class TestPerfdataCommands(AlignakTest):
"""
This class tests the perfomance data commands that can be attached to hosts or services
"""
def setUp(self):
super(TestPerfdataCommands, self).setUp()
self.setup_with_file('cfg/cfg_perfdata_commands.cfg', dispatching=True)
assert self.conf_is_correct
def test_service_perfdata_command(self):
"""
Test the service performance data command
:return:
"""
self._sched = self._scheduler
# We want an event handler (the perfdata command) to be put in the actions dict
# after we got a service check
host = self._sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
# --------------------------------------------------------------
# initialize host/service state
# --------------------------------------------------------------
# Check we have a real command, not only a string
assert isinstance(svc.__class__.perfdata_command, CommandCall)
# Get a service check with perfdata
self.scheduler_loop(1, [[svc, 0, 'OK | percent=99%']])
# The event handler is raised to be launched
self.assert_actions_count(1)
self.assert_actions_match(0, '/submit_service_result', 'command')
self.show_and_clear_actions()
# Now, disable the perfdata management
cmd = "[%lu] DISABLE_PERFORMANCE_DATA" % int(time.time())
self._sched.run_external_commands([cmd])
# Get a service check with perfdata
self.scheduler_loop(1, [[svc, 0, 'OK | percent=99%']])
# No actions
self.assert_actions_count(0)
def test_host_perfdata_command(self):
"""
Test the service performance data command
:return:
"""
self._sched = self._scheduler
# We want an event handler (the perfdata command) to be put in the actions dict
# after we got a service check
host = self._sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
# --------------------------------------------------------------
# initialize host/service state
# --------------------------------------------------------------
# Check we have a real command, not only a string
assert isinstance(host.perfdata_command, CommandCall)
# Get a host check with perfdata
self.scheduler_loop(1, [[host, 0, 'UP | percent=99%']])
# The event handler is raised to be launched
self.assert_actions_count(1)
self.assert_actions_match(0, '/submit_host_result', 'command')
self.show_and_clear_actions()
# Now, disable the perfdata management
cmd = "[%lu] DISABLE_PERFORMANCE_DATA" % int(time.time())
self._sched.run_external_commands([cmd])
# Get a host check with perfdata
self.scheduler_loop(1, [[host, 0, 'UP | percent=99%']])
# No actions
self.assert_actions_count(0)
def test_multiline_perfdata(self):
"""
Test with performance data on several lignes
:return:
"""
self._sched = self._scheduler
# We want an event handler (the perfdata command) to be put in the actions dict
# after we got a service check
host = self._sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
# --------------------------------------------------------------
# initialize host/service state
# --------------------------------------------------------------
# Check we have a real command, not only a string
assert isinstance(svc.perfdata_command, CommandCall)
# Get a service check with perfdata
output = """ DISK OK - free space: / 3326 MB (56%); | /=2643MB;5948;5958;0;5968
/ 15272 MB (77%);
/boot 68 MB (69%);
/home 69357 MB (27%);
/var/log 819 MB (84%); | /boot=68MB;88;93;0;98
/home=69357MB;253404;253409;0;253414
/var/log=818MB;970;975;0;980
"""
# Simulate a check executino
self.fake_check(svc, 0, output)
# Consume simulated check
self.scheduler_loop(1, [])
assert isinstance(svc, SchedulingItem)
print("Actions", self._sched.actions)
print('Output', svc.output)
print('Long output', svc.long_output)
print('Performance data', svc.perf_data)
# Note that the check output is stripped
assert svc.output == 'DISK OK - free space: / 3326 MB (56%);'
# The check long output is also stripped
assert svc.long_output == '/ 15272 MB (77%);\n' \
'/boot 68 MB (69%);\n' \
'/home 69357 MB (27%);\n' \
'/var/log 819 MB (84%);'
# And the performance data are also stripped
assert svc.perf_data == '/=2643MB;5948;5958;0;5968 ' \
'/boot=68MB;88;93;0;98 ' \
'/home=69357MB;253404;253409;0;253414 ' \
'/var/log=818MB;970;975;0;980'
# The event handler is raised to be launched
self.assert_actions_count(1)
self.assert_actions_match(0, '/submit_service_result', 'command')
self.show_and_clear_actions()
|
class TestPerfdataCommands(AlignakTest):
'''
This class tests the perfomance data commands that can be attached to hosts or services
'''
def setUp(self):
pass
def test_service_perfdata_command(self):
'''
Test the service performance data command
:return:
'''
pass
def test_host_perfdata_command(self):
'''
Test the service performance data command
:return:
'''
pass
def test_multiline_perfdata(self):
'''
Test with performance data on several lignes
:return:
'''
pass
| 5 | 4 | 36 | 6 | 19 | 13 | 1 | 0.75 | 1 | 3 | 1 | 0 | 4 | 1 | 4 | 59 | 151 | 26 | 75 | 15 | 70 | 56 | 62 | 15 | 57 | 1 | 2 | 0 | 4 |
4,160 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_properties_override.py
|
tests.test_properties_override.TestPropertyOverrideConfigBroken
|
class TestPropertyOverrideConfigBroken(AlignakTest):
def setUp(self):
super(TestPropertyOverrideConfigBroken, self).setUp()
def test_service_property_override_errors(self):
""" Property override broken """
with pytest.raises(SystemExit):
self.setup_with_file('cfg/cfg_property_override_broken.cfg')
assert not self.conf_is_correct
self.assert_any_cfg_log_match(re.escape(
"[host::test_host_02] Configuration is incorrect;"
))
self.assert_any_cfg_log_match(re.escape(
"[host::test_host_02] invalid service override syntax: fake value"
))
self.assert_any_cfg_log_match(re.escape(
"[host::test_host_02] trying to override property 'retry_interval' on service "
"'fakesrv' but it's unknown for this host"
))
self.assert_any_cfg_log_match(re.escape(
"[host::test_host_02] trying to override 'host_name', a forbidden property for service 'proc proc2'"
))
self.assert_any_cfg_log_match(
"hosts configuration is incorrect!")
|
class TestPropertyOverrideConfigBroken(AlignakTest):
def setUp(self):
pass
def test_service_property_override_errors(self):
''' Property override broken '''
pass
| 3 | 1 | 12 | 1 | 11 | 1 | 1 | 0.05 | 1 | 2 | 0 | 0 | 2 | 0 | 2 | 57 | 26 | 3 | 22 | 3 | 19 | 1 | 12 | 3 | 9 | 1 | 2 | 1 | 2 |
4,161 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/plugins/calendars.py
|
django_databrowse.plugins.calendars.DayView
|
class DayView(DateViewMixin, dates.DayArchiveView):
template_name = 'databrowse/calendar_day.html'
|
class DayView(DateViewMixin, dates.DayArchiveView):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
4,162 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/datastructures.py
|
django_databrowse.datastructures.EasyChoice
|
class EasyChoice(object):
def __init__(self, easy_model, field, value, label):
self.model, self.field = easy_model, field
self.value, self.label = value, label
def __repr__(self):
return smart_text(u'<EasyChoice for %s.%s>' %
(self.model.model._meta.object_name,
self.field.name))
def url(self):
return mark_safe('%s%s/%s/%s/%s/' %
(self.model.site.root_url,
self.model.model._meta.app_label,
self.model.model._meta.model_name,
self.field.field.name,
iri_to_uri(self.value)))
|
class EasyChoice(object):
def __init__(self, easy_model, field, value, label):
pass
def __repr__(self):
pass
def url(self):
pass
| 4 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 3 | 4 | 3 | 3 | 17 | 2 | 15 | 6 | 11 | 0 | 8 | 6 | 4 | 1 | 1 | 0 | 3 |
4,163 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/plugins/calendars.py
|
django_databrowse.plugins.calendars.IndexView
|
class IndexView(DateViewMixin, dates.ArchiveIndexView):
template_name = 'databrowse/calendar_main.html'
|
class IndexView(DateViewMixin, dates.ArchiveIndexView):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
4,164 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/datastructures.py
|
django_databrowse.datastructures.EasyField
|
class EasyField(object):
def __init__(self, easy_model, field):
self.model, self.field = easy_model, field
def __repr__(self):
return smart_text(u'<EasyField for %s.%s>' %
(self.model.model._meta.object_name,
self.field.name))
def choices(self):
for value, label in self.field.choices:
yield EasyChoice(self.model, self, value, label)
def url(self):
if self.field.choices:
return mark_safe('%s%s/%s/%s/' %
(self.model.site.root_url,
self.model.model._meta.app_label,
self.model.model._meta.model_name,
self.field.name))
elif self.field.rel:
return mark_safe('%s%s/%s/' %
(self.model.site.root_url,
self.model.model._meta.app_label,
self.model.model._meta.model_name))
|
class EasyField(object):
def __init__(self, easy_model, field):
pass
def __repr__(self):
pass
def choices(self):
pass
def url(self):
pass
| 5 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 1 | 1 | 1 | 0 | 4 | 2 | 4 | 4 | 25 | 3 | 22 | 7 | 17 | 0 | 12 | 7 | 7 | 3 | 1 | 1 | 7 |
4,165 |
Alir3z4/django-databrowse
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alir3z4_django-databrowse/django_databrowse/tests/__init__.py
|
django_databrowse.tests.SomeModel.Meta
|
class Meta:
db_table = "somemodel"
|
class Meta:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 0 | 0 | 0 |
4,166 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/tests/__init__.py
|
django_databrowse.tests.SomeOtherModel
|
class SomeOtherModel(models.Model):
some_other_field = models.CharField(max_length=50)
def __unicode__(self):
return self.some_other_field
|
class SomeOtherModel(models.Model):
def __unicode__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 5 | 1 | 4 | 3 | 2 | 0 | 4 | 3 | 2 | 1 | 1 | 0 | 1 |
4,167 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/tests/__init__.py
|
django_databrowse.tests.YetAnotherModel
|
class YetAnotherModel(models.Model):
yet_another_field = models.CharField(max_length=50)
def __unicode__(self):
return self.yet_another_field
|
class YetAnotherModel(models.Model):
def __unicode__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 5 | 1 | 4 | 3 | 2 | 0 | 4 | 3 | 2 | 1 | 1 | 0 | 1 |
4,168 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/tests/test_datastructures.py
|
django_databrowse.tests.test_datastructures.EasyChoiceTest
|
class EasyChoiceTest(TestCase):
def test_repr(self):
em = EasyModel(django_databrowse.site, SomeModel)
field = models.CharField(max_length=2, name="Hello")
value, label = "a", "azerty"
ec = EasyChoice(em, field, value, label)
self.assertEqual(ec.__repr__(), "<EasyChoice for SomeModel.Hello>")
|
class EasyChoiceTest(TestCase):
def test_repr(self):
pass
| 2 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 1 | 3 | 3 | 0 | 1 | 0 | 1 | 1 | 8 | 1 | 7 | 6 | 5 | 0 | 7 | 6 | 5 | 1 | 1 | 0 | 1 |
4,169 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/tests/test_datastructures.py
|
django_databrowse.tests.test_datastructures.EasyFieldTest
|
class EasyFieldTest(TestCase):
def test_repr(self):
em = EasyModel(django_databrowse.site, SomeModel)
field = EasyField(em, models.CharField(max_length=50, name="hello"))
self.assertEqual(field.__repr__(), '<EasyField for SomeModel.hello>')
def test_choices(self):
em = EasyModel(django_databrowse.site, SomeModel)
field = EasyField(
em,
models.CharField(max_length=2,
choices=(("a", "azerty"),("q","querty"))
)
)
self.assertEqual(len([f for f in field.choices()]), 2)
def test_urls(self):
em = EasyModel(django_databrowse.site, SomeModel)
em.site.root_url = "root/"
field = EasyField(
em,
models.CharField(max_length=2,
choices=(("a", "azerty"),("q","querty")),
name="hello"
)
)
self.assertEqual(field.url(),
u'root/django_databrowse/somemodel/hello/')
em = EasyModel(django_databrowse.site, SomeInheritedModel)
field = EasyField(em, models.ForeignKey(SomeModel))
self.assertEqual(field.url(),
u'root/django_databrowse/someinheritedmodel/')
|
class EasyFieldTest(TestCase):
def test_repr(self):
pass
def test_choices(self):
pass
def test_urls(self):
pass
| 4 | 0 | 10 | 0 | 10 | 0 | 1 | 0 | 1 | 4 | 4 | 0 | 3 | 0 | 3 | 3 | 34 | 4 | 30 | 10 | 26 | 0 | 17 | 10 | 13 | 1 | 1 | 0 | 3 |
4,170 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/tests/test_datastructures.py
|
django_databrowse.tests.test_datastructures.EasyInstanceTest
|
class EasyInstanceTest(TestCase):
def test_repr(self):
instance = SomeModel.objects.create()
ei = EasyInstance(EasyModel(django_databrowse.site,
SomeModel), instance)
self.assertEqual(ei.__repr__(), "<EasyInstance for SomeModel (1)>")
|
class EasyInstanceTest(TestCase):
def test_repr(self):
pass
| 2 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 1 | 3 | 3 | 0 | 1 | 0 | 1 | 1 | 7 | 1 | 6 | 4 | 4 | 0 | 5 | 4 | 3 | 1 | 1 | 0 | 1 |
4,171 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/tests/test_datastructures.py
|
django_databrowse.tests.test_datastructures.EasyModelTest
|
class EasyModelTest(TestCase):
@classmethod
def setUpClass(self):
django_databrowse.site.register(SomeModel)
@classmethod
def tearDownClass(self):
django_databrowse.site.unregister(SomeModel)
def test_repr(self):
em = EasyModel(django_databrowse.site, SomeModel)
self.assertEqual(em.__repr__(), "<EasyModel for SomeModel>")
def test_model_databrowse(self):
em = EasyModel(django_databrowse.site, SomeModel)
self.assertEqual(em.model_databrowse(), DefaultModelDatabrowse)
def test_url(self):
em = EasyModel(django_databrowse.site, SomeModel)
em.site.root_url = "root/"
self.assertEqual(em.url(), u'root/django_databrowse/somemodel/')
def test_manager(self):
em = EasyModel(django_databrowse.site, SomeModel)
self.assertIsInstance(em.objects(), EasyQuerySet)
def test_field(self):
em = EasyModel(django_databrowse.site, SomeModel)
self.assertIsInstance(em.field("some_field"), EasyField)
self.assertEqual(em.field("hello"), None)
def test_fields(self):
em = EasyModel(django_databrowse.site, SomeModel)
self.assertIsInstance(em.fields(), list)
def test_model_inheritance(self):
django_databrowse.site.register(SomeInheritedModel)
child = SomeInheritedModel.objects.create(some_field='hello',
special='world')
ei = EasyInstance(EasyModel(django_databrowse.site,
SomeModel), child)
ei_child = EasyInstance(EasyModel(django_databrowse.site,
SomeInheritedModel), child)
self.assertEqual(
next(ei.related_objects())['object_list'][0].instance,
ei_child.instance)
def test_model_inheritance_no_child(self):
instance = SomeModel.objects.create(some_field='hello')
ei = EasyInstance(EasyModel(django_databrowse.site, SomeModel),
instance)
self.assertEqual(list(ei.related_objects()), [])
|
class EasyModelTest(TestCase):
@classmethod
def setUpClass(self):
pass
@classmethod
def tearDownClass(self):
pass
def test_repr(self):
pass
def test_model_databrowse(self):
pass
def test_url(self):
pass
def test_manager(self):
pass
def test_field(self):
pass
def test_fields(self):
pass
def test_model_inheritance(self):
pass
def test_model_inheritance_no_child(self):
pass
| 13 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 8 | 7 | 0 | 8 | 0 | 10 | 10 | 53 | 10 | 43 | 24 | 30 | 0 | 35 | 22 | 24 | 1 | 1 | 0 | 10 |
4,172 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/tests/__init__.py
|
django_databrowse.tests.SomeModel
|
class SomeModel(models.Model):
some_field = models.CharField(max_length=50)
def __unicode__(self):
return self.some_field
class Meta:
db_table = "somemodel"
|
class SomeModel(models.Model):
def __unicode__(self):
pass
class Meta:
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 1 | 1 | 8 | 2 | 6 | 5 | 3 | 0 | 6 | 5 | 3 | 1 | 1 | 0 | 1 |
4,173 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/datastructures.py
|
django_databrowse.datastructures.EasyInstance
|
class EasyInstance(object):
def __init__(self, easy_model, instance):
self.model, self.instance = easy_model, instance
def __repr__(self):
return smart_text(u'<EasyInstance for %s (%s)>' %
(self.model.model._meta.object_name,
self.instance._get_pk_val()))
def __str__(self):
val = smart_text(self.instance)
if len(val) > DISPLAY_SIZE:
return val[:DISPLAY_SIZE] + u'...'
return val
def pk(self):
return self.instance._get_pk_val()
def url(self):
return mark_safe('%s%s/%s/objects/%s/' %
(self.model.site.root_url,
self.model.model._meta.app_label,
self.model.model._meta.model_name,
iri_to_uri(self.pk())))
def fields(self):
"""
Generator that yields EasyInstanceFields for each field in this
EasyInstance's model.
"""
for f in self.model.model._meta.fields +\
self.model.model._meta.many_to_many:
yield EasyInstanceField(self.model, self, f)
def related_objects(self):
"""
Generator that yields dictionaries of all models that have this
EasyInstance's model as a ForeignKey or ManyToManyField, along with
lists of related objects.
"""
related_objects = [
f for f in self.model.model._meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created and not f.concrete
]
related_m2m = [
f for f in self.model.model._meta.get_fields(include_hidden=True)
if f.many_to_many and f.auto_created
]
for rel_object in related_objects + related_m2m:
if rel_object.model not in self.model.model_list:
continue # Skip models that aren't in the model_list
em = EasyModel(self.model.site, rel_object.related_model)
try:
rel_accessor = getattr(self.instance, rel_object.get_accessor_name())
except ObjectDoesNotExist:
continue
if rel_object.field.rel.multiple:
object_list = [EasyInstance(em, i) for i in rel_accessor.all()]
else: # for one-to-one fields
object_list = [EasyInstance(em, rel_accessor)]
yield {
'model': em,
'related_field': rel_object.field.verbose_name,
'object_list': object_list,
}
|
class EasyInstance(object):
def __init__(self, easy_model, instance):
pass
def __repr__(self):
pass
def __str__(self):
pass
def pk(self):
pass
def url(self):
pass
def fields(self):
'''
Generator that yields EasyInstanceFields for each field in this
EasyInstance's model.
'''
pass
def related_objects(self):
'''
Generator that yields dictionaries of all models that have this
EasyInstance's model as a ForeignKey or ManyToManyField, along with
lists of related objects.
'''
pass
| 8 | 2 | 9 | 0 | 7 | 2 | 2 | 0.22 | 1 | 2 | 2 | 0 | 7 | 2 | 7 | 7 | 67 | 7 | 51 | 17 | 43 | 11 | 32 | 17 | 24 | 5 | 1 | 2 | 13 |
4,174 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/sites.py
|
django_databrowse.sites.ModelDatabrowse
|
class ModelDatabrowse(object):
plugins = {}
def __init__(self, model, site):
self.model = model
self.site = site
def root(self, request, url):
"""
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'objects/3'.
"""
# Delegate to the appropriate method, based on the URL.
if url is None:
return self.main_view(request)
try:
plugin_name, rest_of_url = url.split('/', 1)
except ValueError: # need more than 1 value to unpack
plugin_name, rest_of_url = url, None
try:
plugin = self.plugins[plugin_name]
except KeyError:
raise http.Http404('A plugin with the requested name '
'does not exist.')
return plugin.model_view(request, self, rest_of_url)
def main_view(self, request):
easy_model = EasyModel(self.site, self.model)
html_snippets = mark_safe(
u'\n'.join([p.model_index_html(
request,
self.model,
self.site
) for p in self.plugins.values()])
)
obj_list = easy_model.objects()
numitems = request.GET.get('items')
items_per_page = [25, 50, 100]
if numitems and numitems.isdigit() and int(numitems) > 0:
paginator = Paginator(obj_list, numitems)
else:
# fall back to default
paginator = Paginator(obj_list, items_per_page[0])
page = request.GET.get('page')
try:
obj_list_page = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
obj_list_page = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page.
obj_list_page = paginator.page(paginator.num_pages)
return render(request,
'databrowse/model_detail.html',
{
'model': easy_model,
'root_url': self.site.root_url,
'plugin_html': html_snippets,
'object_list': obj_list_page,
'items_per_page': items_per_page,
}
)
|
class ModelDatabrowse(object):
def __init__(self, model, site):
pass
def root(self, request, url):
'''
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'objects/3'.
'''
pass
def main_view(self, request):
pass
| 4 | 1 | 20 | 1 | 16 | 3 | 3 | 0.18 | 1 | 4 | 1 | 1 | 3 | 2 | 3 | 3 | 64 | 5 | 51 | 17 | 47 | 9 | 34 | 17 | 30 | 4 | 1 | 1 | 9 |
4,175 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/tests/test_sites.py
|
django_databrowse.tests.test_sites.DatabrowseTests
|
class DatabrowseTests(TestCase):
@classmethod
def tearDownClass(self):
django_databrowse.site.unregister(SomeModel)
def test_databrowse_register_unregister(self):
django_databrowse.site.register(SomeModel)
self.assertTrue(SomeModel in django_databrowse.site.registry)
django_databrowse.site.register(SomeOtherModel, YetAnotherModel)
self.assertTrue(SomeOtherModel in django_databrowse.site.registry)
self.assertTrue(YetAnotherModel in django_databrowse.site.registry)
self.assertRaisesMessage(
django_databrowse.sites.AlreadyRegistered,
'The model SomeModel is already registered',
django_databrowse.site.register, SomeModel, SomeOtherModel
)
django_databrowse.site.unregister(SomeOtherModel)
self.assertFalse(SomeOtherModel in django_databrowse.site.registry)
django_databrowse.site.unregister(SomeModel, YetAnotherModel)
self.assertFalse(SomeModel in django_databrowse.site.registry)
self.assertFalse(YetAnotherModel in django_databrowse.site.registry)
self.assertRaisesMessage(
django_databrowse.sites.NotRegistered,
'The model SomeModel is not registered',
django_databrowse.site.unregister, SomeModel, SomeOtherModel
)
self.assertRaisesMessage(
django_databrowse.sites.AlreadyRegistered,
'The model SomeModel is already registered',
django_databrowse.site.register, SomeModel, SomeModel
)
|
class DatabrowseTests(TestCase):
@classmethod
def tearDownClass(self):
pass
def test_databrowse_register_unregister(self):
pass
| 4 | 0 | 16 | 2 | 14 | 0 | 1 | 0 | 1 | 3 | 3 | 0 | 1 | 0 | 2 | 2 | 36 | 6 | 30 | 4 | 26 | 0 | 17 | 3 | 14 | 1 | 1 | 0 | 2 |
4,176 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/sites.py
|
django_databrowse.sites.DefaultModelDatabrowse
|
class DefaultModelDatabrowse(ModelDatabrowse):
plugins = {
'objects': ObjectDetailPlugin(),
'calendars': CalendarPlugin(),
'fields': FieldChoicePlugin()
}
|
class DefaultModelDatabrowse(ModelDatabrowse):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 6 | 0 | 6 | 2 | 5 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
4,177 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/sites.py
|
django_databrowse.sites.DatabrowseSite
|
class DatabrowseSite(object):
def __init__(self):
self.registry = {} # model_class -> databrowse_class
self.root_url = None
def register(self, *model_list, **options):
"""
Registers the given model(s) with the given databrowse site.
The model(s) should be Model classes, not instances.
If a databrowse class isn't given, it will use DefaultModelDatabrowse
(the default databrowse options).
If a model is already registered, this will raise AlreadyRegistered.
"""
databrowse_class = options.pop('databrowse_class',
DefaultModelDatabrowse)
for model in model_list:
if model in self.registry:
raise AlreadyRegistered('The model %s is already registered' %
model.__name__)
self.registry[model] = databrowse_class
def unregister(self, *model_list):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
for model in model_list:
if model not in self.registry:
raise NotRegistered('The model %s is not registered' %
model.__name__)
del self.registry[model]
def root(self, request, url):
"""
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'comments/comment/'.
"""
self.root_url = request.path[:len(request.path) - len(url)]
url = url.rstrip('/') # Trim trailing slash, if it exists.
if url == '':
return self.index(request)
elif '/' in url:
return self.model_page(request, *url.split('/', 2))
raise http.Http404('The requested databrowse page does not exist.')
def index(self, request):
m_list = [EasyModel(self, m) for m in self.registry.keys()]
return render(request,
'databrowse/homepage.html',
{'model_list': m_list, 'root_url': self.root_url},
)
def model_page(self, request, app_label, model_name, rest_of_url=None):
"""
Handles the model-specific functionality of the databrowse site,
delegating<to the appropriate ModelDatabrowse class.
"""
try:
model = get_model(app_label, model_name)
except LookupError:
model = None
if model is None:
raise http.Http404("App %r, model %r, not found." %
(app_label, model_name))
try:
databrowse_class = self.registry[model]
except KeyError:
raise http.Http404("This model exists but has not been registered "
"with databrowse.")
return databrowse_class(model, self).root(request, rest_of_url)
|
class DatabrowseSite(object):
def __init__(self):
pass
def register(self, *model_list, **options):
'''
Registers the given model(s) with the given databrowse site.
The model(s) should be Model classes, not instances.
If a databrowse class isn't given, it will use DefaultModelDatabrowse
(the default databrowse options).
If a model is already registered, this will raise AlreadyRegistered.
'''
pass
def unregister(self, *model_list):
'''
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
'''
pass
def root(self, request, url):
'''
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'comments/comment/'.
'''
pass
def index(self, request):
pass
def model_page(self, request, app_label, model_name, rest_of_url=None):
'''
Handles the model-specific functionality of the databrowse site,
delegating<to the appropriate ModelDatabrowse class.
'''
pass
| 7 | 4 | 12 | 1 | 8 | 4 | 3 | 0.46 | 1 | 6 | 4 | 0 | 6 | 2 | 6 | 6 | 78 | 13 | 46 | 15 | 39 | 21 | 37 | 15 | 30 | 4 | 1 | 2 | 15 |
4,178 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/plugins/objects.py
|
django_databrowse.plugins.objects.ObjectDetailPlugin
|
class ObjectDetailPlugin(DatabrowsePlugin):
def model_view(self, request, model_databrowse, url):
# If the object ID wasn't provided, redirect to the model page,
# Which is one level up.
if url is None:
return http.HttpResponseRedirect(
urlparse.urljoin(request.path, '../')
)
easy_model = EasyModel(
model_databrowse.site,
model_databrowse.model
)
try:
obj = easy_model.object_by_pk(url)
except ObjectDoesNotExist:
raise http.Http404('Id not found')
except ValueError:
raise http.Http404('Invalid format key provided')
return render(request,
'databrowse/object_detail.html',
{
'object': obj,
'root_url': model_databrowse.site.root_url
}
)
|
class ObjectDetailPlugin(DatabrowsePlugin):
def model_view(self, request, model_databrowse, url):
pass
| 2 | 0 | 24 | 0 | 22 | 2 | 4 | 0.09 | 1 | 2 | 1 | 0 | 1 | 0 | 1 | 4 | 25 | 0 | 23 | 4 | 21 | 2 | 12 | 4 | 10 | 4 | 2 | 1 | 4 |
4,179 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/tests/__init__.py
|
django_databrowse.tests.SomeInheritedModel
|
class SomeInheritedModel(SomeModel):
special = models.CharField(max_length=30)
|
class SomeInheritedModel(SomeModel):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
4,180 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/datastructures.py
|
django_databrowse.datastructures.EasyModel
|
class EasyModel(object):
def __init__(self, site, model):
self.site = site
self.model = model
self.model_list = site.registry.keys()
self.verbose_name = model._meta.verbose_name
self.verbose_name_plural = model._meta.verbose_name_plural
def __repr__(self):
return '<EasyModel for %s>' % \
smart_text(self.model._meta.object_name)
def model_databrowse(self):
"Returns the ModelDatabrowse class for this model."
return self.site.registry[self.model]
def url(self):
return mark_safe('%s%s/%s/' % (self.site.root_url,
self.model._meta.app_label,
self.model._meta.model_name))
def objects(self, **kwargs):
return self.get_query_set().filter(**kwargs)
def get_query_set(self):
qs = self.model._default_manager.get_queryset()
easy_qs = EasyQuerySet(model=qs.model, query=qs.query.clone(),
using=qs._db, hints=qs._hints)
easy_qs._easymodel = self
return easy_qs
def object_by_pk(self, pk):
return EasyInstance(self, self.model._default_manager.get(pk=pk))
def sample_objects(self):
for obj in self.model._default_manager.all()[:3]:
yield EasyInstance(self, obj)
def field(self, name):
try:
f = self.model._meta.get_field(name)
except models.FieldDoesNotExist:
return None
return EasyField(self, f)
def fields(self):
return [EasyField(self, f) for f in (self.model._meta.fields +
self.model._meta.many_to_many)]
|
class EasyModel(object):
def __init__(self, site, model):
pass
def __repr__(self):
pass
def model_databrowse(self):
'''Returns the ModelDatabrowse class for this model.'''
pass
def url(self):
pass
def objects(self, **kwargs):
pass
def get_query_set(self):
pass
def object_by_pk(self, pk):
pass
def sample_objects(self):
pass
def field(self, name):
pass
def fields(self):
pass
| 11 | 1 | 4 | 0 | 4 | 0 | 1 | 0.03 | 1 | 3 | 3 | 0 | 10 | 5 | 10 | 10 | 48 | 9 | 38 | 20 | 27 | 1 | 33 | 20 | 22 | 2 | 1 | 1 | 12 |
4,181 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/datastructures.py
|
django_databrowse.datastructures.EasyQuerySet
|
class EasyQuerySet(QuerySet):
"""
When creating (or cloning to) an `EasyQuerySet`, make sure to set the
`_easymodel` variable to the related `EasyModel`.
"""
def iterator(self, *args, **kwargs):
for obj in super(EasyQuerySet, self).iterator(*args, **kwargs):
yield EasyInstance(self._easymodel, obj)
def _clone(self, *args, **kwargs):
c = super(EasyQuerySet, self)._clone(*args, **kwargs)
c._easymodel = self._easymodel
return c
|
class EasyQuerySet(QuerySet):
'''
When creating (or cloning to) an `EasyQuerySet`, make sure to set the
`_easymodel` variable to the related `EasyModel`.
'''
def iterator(self, *args, **kwargs):
pass
def _clone(self, *args, **kwargs):
pass
| 3 | 1 | 4 | 0 | 4 | 0 | 2 | 0.5 | 1 | 2 | 1 | 0 | 2 | 0 | 2 | 2 | 13 | 1 | 8 | 5 | 5 | 4 | 8 | 5 | 5 | 2 | 1 | 1 | 3 |
4,182 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/plugins/fieldchoices.py
|
django_databrowse.plugins.fieldchoices.FieldChoicePlugin
|
class FieldChoicePlugin(DatabrowsePlugin):
def __init__(self, field_filter=None):
# If field_filter is given, it should be a callable that takes a
# Django database Field instance and returns True if that field
# Should be included. If field_filter is None, that all fields will
# be used.
self.field_filter = field_filter
def field_dict(self, model):
"""
Helper function that returns a dictionary of all fields in the given
model. If self.field_filter is set, it only includes the fields that
match the filter.
"""
if self.field_filter:
return dict(
[(f.name, f) for f in model._meta.fields
if self.field_filter(f)]
)
else:
return dict(
[(f.name, f) for f in model._meta.fields
if not f.rel and
not f.primary_key and
not f.unique and
not isinstance(f, (models.AutoField, models.TextField))]
)
def model_index_html(self, request, model, site):
fields = self.field_dict(model)
if not fields:
return u''
return mark_safe(
u'<p class="filter"><strong>View by:</strong> %s</p>' % \
u', '.join(
['<a href="fields/%s/">%s</a>' %
(f.name, force_text(capfirst(f.verbose_name)))
for f in fields.values()])
)
def urls(self, plugin_name, easy_instance_field):
if easy_instance_field.field \
in self.field_dict(easy_instance_field.model.model).values():
field_value = smart_str(easy_instance_field.raw_value)
return [mark_safe(u'%s%s/%s/%s/' % (
easy_instance_field.model.url(),
plugin_name, easy_instance_field.field.name,
quote(field_value, safe='')))]
def model_view(self, request, model_databrowse, url):
self.model, self.site = model_databrowse.model, model_databrowse.site
self.fields = self.field_dict(self.model)
# If the model has no fields with choices, there's no point in going
# further.
if not self.fields:
raise http.Http404('The requested model has no fields.')
if url is None:
return self.homepage_view(request)
url_bits = url.split('/', 1)
if url_bits[0] in self.fields:
return self.field_view(
request,
self.fields[url_bits[0]],
*url_bits[1:]
)
raise http.Http404('The requested page does not exist.')
def homepage_view(self, request):
easy_model = EasyModel(self.site, self.model)
field_list = list(self.fields.values())
field_list.sort(key=lambda k: k.verbose_name)
return render(request,
'databrowse/fieldchoice_homepage.html',
{
'root_url': self.site.root_url,
'model': easy_model,
'field_list': field_list
}
)
def field_view(self, request, field, value=None):
easy_model = EasyModel(self.site, self.model)
easy_field = easy_model.field(field.name)
if value is not None:
obj_list = easy_model.objects(**{field.name: value})
else:
obj_list = [v[field.name] for v in \
self.model._default_manager.distinct().order_by(field.name).\
values(field.name)]
# add paging
numitems = request.GET.get('items')
items_per_page = [25,50,100]
if numitems and numitems.isdigit() and int(numitems)>0:
paginator = Paginator(obj_list, numitems)
else:
# fall back to default
paginator = Paginator(obj_list, items_per_page[0])
page = request.GET.get('page')
try:
obj_list_page = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
obj_list_page = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page.
obj_list_page = paginator.page(paginator.num_pages)
if value is not None:
return render(request,
'databrowse/fieldchoice_detail.html',
{
'root_url': self.site.root_url,
'model': easy_model,
'field': easy_field,
'value': value,
'object_list': obj_list_page,
'items_per_page': items_per_page,
}
)
return render(request,
'databrowse/fieldchoice_list.html',
{
'root_url': self.site.root_url,
'model': easy_model,
'field': easy_field,
'object_list': obj_list_page,
'items_per_page': items_per_page,
}
)
|
class FieldChoicePlugin(DatabrowsePlugin):
def __init__(self, field_filter=None):
pass
def field_dict(self, model):
'''
Helper function that returns a dictionary of all fields in the given
model. If self.field_filter is set, it only includes the fields that
match the filter.
'''
pass
def model_index_html(self, request, model, site):
pass
def urls(self, plugin_name, easy_instance_field):
pass
def model_view(self, request, model_databrowse, url):
pass
def homepage_view(self, request):
pass
def field_view(self, request, field, value=None):
pass
| 8 | 1 | 18 | 1 | 15 | 2 | 3 | 0.14 | 1 | 4 | 1 | 0 | 7 | 4 | 7 | 10 | 135 | 13 | 107 | 24 | 99 | 15 | 53 | 24 | 45 | 6 | 2 | 1 | 18 |
4,183 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/plugins/calendars.py
|
django_databrowse.plugins.calendars.YearView
|
class YearView(DateViewMixin, dates.YearArchiveView):
template_name = 'databrowse/calendar_year.html'
|
class YearView(DateViewMixin, dates.YearArchiveView):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
4,184 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/plugins/calendars.py
|
django_databrowse.plugins.calendars.MonthView
|
class MonthView(DateViewMixin, dates.MonthArchiveView):
template_name = 'databrowse/calendar_month.html'
|
class MonthView(DateViewMixin, dates.MonthArchiveView):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
4,185 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/plugins/calendars.py
|
django_databrowse.plugins.calendars.CalendarPlugin
|
class CalendarPlugin(DatabrowsePlugin):
def __init__(self, field_names=None):
self.field_names = field_names
def field_dict(self, model):
"""
Helper function that returns a dictionary of all DateFields or
DateTimeFields in the given model. If self.field_names is set,
it takes that into account when building the dictionary.
"""
if self.field_names is None:
return dict([(f.name, f) for f in model._meta.fields
if isinstance(f, models.DateField)])
else:
return dict([(f.name, f)
for f in model._meta.fields
if isinstance(f, models.DateField) and
(f.name in self.field_names)])
def model_index_html(self, request, model, site):
fields = self.field_dict(model)
if not fields:
return u''
return mark_safe(
u'<p class="filter"><strong>View calendar by:</strong> %s</p>' % \
u', '.join(
['<a href="calendars/%s/">%s</a>' %
(f.name,force_text(capfirst(f.verbose_name)))
for f in fields.values()])
)
def urls(self, plugin_name, easy_instance_field):
if isinstance(easy_instance_field.field, models.DateField):
d = easy_instance_field.raw_value
return [mark_safe(u'%s%s/%s/%s/%s/%s/' % (
easy_instance_field.model.url(),
plugin_name, easy_instance_field.field.name,
str(d.year),
datetime_safe.new_date(d).strftime('%b').lower(),
d.day))]
def model_view(self, request, model_databrowse, url):
self.model, self.site = model_databrowse.model, model_databrowse.site
self.fields = self.field_dict(self.model)
# If the model has no DateFields, there's no point in going further.
if not self.fields:
raise http.Http404('The requested model has no calendars.')
if url is None:
return self.homepage_view(request)
url_bits = url.split('/')
if url_bits[0] in self.fields:
return self.calendar_view(
request,
self.fields[url_bits[0]],
*url_bits[1:]
)
raise http.Http404('The requested page does not exist.')
def homepage_view(self, request):
easy_model = EasyModel(self.site, self.model)
field_list = self.fields.values()
field_list.sort(key=lambda k:k.verbose_name)
return render(request,
'databrowse/calendar_homepage.html',
{
'root_url': self.site.root_url,
'model': easy_model,
'field_list': field_list
}
)
def calendar_view(self, request, field, year=None, month=None, day=None):
easy_model = EasyModel(self.site, self.model)
root_url = self.site.root_url
if day is not None:
return DayView.as_view(
year=year, month=month, day=day,
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
elif month is not None:
return MonthView.as_view(
year=year, month=month,
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
elif year is not None:
return YearView.as_view(
year=year,
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
else:
return IndexView.as_view(
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
assert False, ('%s, %s, %s, %s' % (field, year, month, day))
|
class CalendarPlugin(DatabrowsePlugin):
def __init__(self, field_names=None):
pass
def field_dict(self, model):
'''
Helper function that returns a dictionary of all DateFields or
DateTimeFields in the given model. If self.field_names is set,
it takes that into account when building the dictionary.
'''
pass
def model_index_html(self, request, model, site):
pass
def urls(self, plugin_name, easy_instance_field):
pass
def model_view(self, request, model_databrowse, url):
pass
def homepage_view(self, request):
pass
def calendar_view(self, request, field, year=None, month=None, day=None):
pass
| 8 | 1 | 15 | 1 | 14 | 1 | 2 | 0.06 | 1 | 7 | 5 | 0 | 7 | 4 | 7 | 10 | 115 | 11 | 98 | 18 | 90 | 6 | 41 | 18 | 33 | 4 | 2 | 1 | 16 |
4,186 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/plugins/calendars.py
|
django_databrowse.plugins.calendars.DateViewMixin
|
class DateViewMixin(object):
allow_empty = False
allow_future = True
root_url = None
model = None
field = None
def get_context_data(self, **kwargs):
context = super(DateViewMixin, self).get_context_data(**kwargs)
context.update({
'root_url': self.root_url,
'model': self.model,
'field': self.field
})
return context
|
class DateViewMixin(object):
def get_context_data(self, **kwargs):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 1 | 0 | 4 | 1 | 0 | 1 | 1 | 15 | 1 | 14 | 8 | 12 | 0 | 10 | 8 | 8 | 1 | 1 | 0 | 1 |
4,187 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/datastructures.py
|
django_databrowse.datastructures.EasyInstanceField
|
class EasyInstanceField(object):
def __init__(self, easy_model, instance, field):
self.model, self.field, self.instance = easy_model, field, instance
self.raw_value = getattr(instance.instance, field.name)
def __repr__(self):
return smart_text(u'<EasyInstanceField for %s.%s>' %
(self.model.model._meta.object_name,
self.field.name))
def values(self):
"""
Returns a list of values for this field for this instance. It's a list
so we can accomodate many-to-many fields.
"""
# This import is deliberately inside the function because it causes
# some settings to be imported, and we don't want to do that at the
# module level.
if self.field.rel:
if isinstance(self.field.rel, models.ManyToOneRel):
objs = getattr(self.instance.instance, self.field.name)
elif isinstance(self.field.rel,
models.ManyToManyRel): # ManyToManyRel
return list(getattr(self.instance.instance,
self.field.name).all())
elif self.field.choices:
objs = dict(self.field.choices).get(self.raw_value, EMPTY_VALUE)
elif isinstance(self.field, models.DateField) or \
isinstance(self.field, models.TimeField):
if self.raw_value:
if isinstance(self.field, models.DateTimeField):
objs = capfirst(formats.date_format(self.raw_value,
'DATETIME_FORMAT'))
elif isinstance(self.field, models.TimeField):
objs = capfirst(formats.time_format(self.raw_value,
'TIME_FORMAT'))
else:
objs = capfirst(formats.date_format(self.raw_value,
'DATE_FORMAT'))
else:
objs = EMPTY_VALUE
elif isinstance(self.field, models.BooleanField) or \
isinstance(self.field, models.NullBooleanField):
objs = {True: 'Yes', False: 'No', None: 'Unknown'}[self.raw_value]
else:
objs = self.raw_value
return [objs]
def urls(self):
"Returns a list of (value, URL) tuples."
# First, check the urls() method for each plugin.
plugin_urls = []
for plugin_name, plugin in \
self.model.model_databrowse().plugins.items():
urls = plugin.urls(plugin_name, self)
if urls is not None:
#plugin_urls.append(urls)
values = self.values()
return zip(self.values(), urls)
if self.field.rel:
m = EasyModel(self.model.site, self.field.rel.to)
if self.field.rel.to in self.model.model_list:
lst = []
for value in self.values():
if value is None:
continue
url = mark_safe('%s%s/%s/objects/%s/' %
(self.model.site.root_url,
m.model._meta.app_label,
m.model._meta.model_name,
iri_to_uri(value._get_pk_val())))
lst.append((smart_text(value), url))
else:
lst = [(value, None) for value in self.values()]
elif self.field.choices:
lst = []
for value in self.values():
url = mark_safe('%s%s/%s/fields/%s/%s/' %
(self.model.site.root_url,
self.model.model._meta.app_label,
self.model.model._meta.model_name,
self.field.name,
iri_to_uri(self.raw_value)))
lst.append((value, url))
elif isinstance(self.field, models.URLField):
val = self.values()[0]
lst = [(val, iri_to_uri(val))]
else:
lst = [(self.values()[0], None)]
return lst
|
class EasyInstanceField(object):
def __init__(self, easy_model, instance, field):
pass
def __repr__(self):
pass
def values(self):
'''
Returns a list of values for this field for this instance. It's a list
so we can accomodate many-to-many fields.
'''
pass
def urls(self):
'''Returns a list of (value, URL) tuples.'''
pass
| 5 | 2 | 22 | 0 | 19 | 3 | 6 | 0.14 | 1 | 4 | 1 | 0 | 4 | 4 | 4 | 4 | 90 | 3 | 77 | 17 | 72 | 11 | 46 | 17 | 41 | 10 | 1 | 4 | 22 |
4,188 |
Alir3z4/django-databrowse
|
Alir3z4_django-databrowse/django_databrowse/tests/test_sites.py
|
django_databrowse.tests.test_sites.DatabrowseTestsClient
|
class DatabrowseTestsClient(TestCase):
"""
Test the behavior of databrowse with a Client
"""
@classmethod
def tearDownClass(self):
django_databrowse.site.unregister(SomeModel)
def test_urls(self):
django_databrowse.site.register(SomeModel)
response = Client().get('')
self.assertEqual(response.status_code, 200)
self.assertIsInstance(
response.context['model_list'][0],
EasyModel)
response = Client().get('/django_databrowse/somemodel/')
self.assertEqual(response.status_code, 200)
response = Client().get('/django_databrowse/doesnotexistmodel/')
self.assertEqual(response.status_code, 404)
response = Client().get('/django_databrowse/something/somemodel/')
self.assertEqual(response.status_code, 404)
response = Client().get(
'/django_databrowse/somemodel/fields/some_field/')
self.assertEqual(response.status_code, 200)
response = Client().get(
'/django_databrowse/someothermodel/')
self.assertEqual(response.status_code, 404)
|
class DatabrowseTestsClient(TestCase):
'''
Test the behavior of databrowse with a Client
'''
@classmethod
def tearDownClass(self):
pass
def test_urls(self):
pass
| 4 | 1 | 12 | 1 | 11 | 0 | 1 | 0.13 | 1 | 2 | 2 | 0 | 1 | 0 | 2 | 2 | 29 | 3 | 23 | 5 | 19 | 3 | 18 | 4 | 15 | 1 | 1 | 0 | 2 |
4,189 |
Alir3z4/python-currencies
|
Alir3z4_python-currencies/currencies/exceptions.py
|
currencies.exceptions.CurrencyDoesNotExist
|
class CurrencyDoesNotExist(Exception):
"""
The requested Currency does not exist
"""
pass
|
class CurrencyDoesNotExist(Exception):
'''
The requested Currency does not exist
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 5 | 0 | 2 | 1 | 1 | 3 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
4,190 |
Alir3z4/python-currencies
|
Alir3z4_python-currencies/currencies/__init__.py
|
currencies.Currency
|
class Currency:
money_currency: Optional[str] = None
money_formats = MONEY_FORMATS
def __init__(self, money_currency: str):
self.set_money_currency(money_currency)
def set_money_currency(self, money_currency: str) -> None:
if money_currency not in self.money_formats:
raise CurrencyDoesNotExist
self.money_currency = money_currency
def get_money_currency(self) -> str:
return self.money_currency
@classmethod
def get_currency_formats(cls) -> list:
return list(cls.money_formats.keys())
def get_money_format(self, amount: Union[int, float, str]) -> str:
"""
Usage:
>>> currency = Currency('USD')
>>> currency.get_money_format(13)
>>> '$13'
>>> currency.get_money_format(13.99)
>>> '$13.99'
>>> currency.get_money_format('13,2313,33')
>>> '$13,2313,33'
"""
return self.money_formats[
self.get_money_currency()
]['money_format'].format(amount=amount)
def get_money_with_currency_format(self, amount: Union[int, float, str]) -> str:
"""
Usage:
>>> currency = Currency('USD')
>>> currency.get_money_with_currency_format(13)
>>> '$13 USD'
>>> currency.get_money_with_currency_format(13.99)
>>> '$13.99 USD'
>>> currency.get_money_with_currency_format('13,2313,33')
>>> '$13,2313,33 USD'
"""
return self.money_formats[
self.get_money_currency()
]['money_with_currency_format'].format(amount=amount)
|
class Currency:
def __init__(self, money_currency: str):
pass
def set_money_currency(self, money_currency: str) -> None:
pass
def get_money_currency(self) -> str:
pass
@classmethod
def get_currency_formats(cls) -> list:
pass
def get_money_format(self, amount: Union[int, float, str]) -> str:
'''
Usage:
>>> currency = Currency('USD')
>>> currency.get_money_format(13)
>>> '$13'
>>> currency.get_money_format(13.99)
>>> '$13.99'
>>> currency.get_money_format('13,2313,33')
>>> '$13,2313,33'
'''
pass
def get_money_with_currency_format(self, amount: Union[int, float, str]) -> str:
'''
Usage:
>>> currency = Currency('USD')
>>> currency.get_money_with_currency_format(13)
>>> '$13 USD'
>>> currency.get_money_with_currency_format(13.99)
>>> '$13.99 USD'
>>> currency.get_money_with_currency_format('13,2313,33')
>>> '$13,2313,33 USD'
'''
pass
| 8 | 2 | 7 | 0 | 3 | 3 | 1 | 0.91 | 0 | 5 | 1 | 0 | 5 | 0 | 6 | 6 | 49 | 7 | 22 | 10 | 14 | 20 | 17 | 9 | 10 | 2 | 0 | 1 | 7 |
4,191 |
Alir3z4/python-currencies
|
Alir3z4_python-currencies/setup.py
|
setup.RunTests
|
class RunTests(Command):
description = "run all tests for python-currencies"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
tests = unittest.TestLoader().discover('.')
runner = unittest.TextTestRunner()
results = runner.run(tests)
sys.exit(not results.wasSuccessful())
|
class RunTests(Command):
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 2 | 0 | 0 | 3 | 0 | 3 | 42 | 15 | 3 | 12 | 9 | 8 | 0 | 12 | 9 | 8 | 1 | 2 | 0 | 3 |
4,192 |
Alir3z4/python-currencies
|
Alir3z4_python-currencies/tests/test_currency.py
|
tests.test_currency.TestCurrency
|
class TestCurrency(TestCase):
def test_get_version(self):
version = get_version()
self.assertIsInstance(version, str)
self.assertEqual(len(version.split('.')), len(__VERSION__))
def test_get_money_currency(self):
currency = Currency('USD')
self.assertIsInstance(currency.get_money_currency(), str)
self.assertEqual(currency.get_money_currency(), 'USD')
def test_set_money_currency(self):
currency = Currency('USD')
self.assertEqual(currency.get_money_currency(), 'USD')
self.assertEqual(currency.get_money_format(13), '$13')
currency.set_money_currency('AED')
self.assertEqual(currency.get_money_currency(), 'AED')
self.assertEqual(currency.get_money_format(13), 'Dhs. 13')
def test_get_currency_formats(self):
currency_formats = Currency.get_currency_formats()
self.assertIsNotNone(currency_formats)
self.assertIsInstance(currency_formats, list)
self.assertGreater(len(currency_formats), 0)
def test_get_money_format(self):
currency = Currency('USD')
self.assertEqual(currency.get_money_format(13), '$13')
self.assertEqual(currency.get_money_format(13.99), '$13.99')
self.assertEqual(
currency.get_money_format('13,2313,33'),
'$13,2313,33'
)
def test_get_money_with_currency_format(self):
currency = Currency('USD')
self.assertEqual(currency.get_money_with_currency_format(13.99), '$13.99 USD')
self.assertEqual(
currency.get_money_with_currency_format('13,2313,33'),
'$13,2313,33 USD'
)
def test_does_not_exist_currency(self):
self.assertRaises(
CurrencyDoesNotExist,
Currency,
money_currency='BingoMingo'
)
|
class TestCurrency(TestCase):
def test_get_version(self):
pass
def test_get_money_currency(self):
pass
def test_set_money_currency(self):
pass
def test_get_currency_formats(self):
pass
def test_get_money_format(self):
pass
def test_get_money_with_currency_format(self):
pass
def test_does_not_exist_currency(self):
pass
| 8 | 0 | 7 | 1 | 6 | 0 | 1 | 0 | 1 | 4 | 2 | 0 | 7 | 0 | 7 | 79 | 56 | 14 | 42 | 14 | 34 | 0 | 32 | 14 | 24 | 1 | 2 | 0 | 7 |
4,193 |
Alir3z4/python-gignore
|
Alir3z4_python-gignore/gignore/__init__.py
|
gignore.Gignore
|
class Gignore(object):
BASE_URL = 'https://raw.githubusercontent.com/github/gitignore/master/'
name = None
file_content = None
valid = True
errors = []
def __init__(self, name):
self.set_name(name)
self.clean_name()
def get_base_url(self):
"""
:rtype: str
"""
return self.BASE_URL
def set_name(self, name):
"""
:type name: str
"""
self.name = name
def get_name(self):
"""
:rtype: str
"""
return self.name
def set_file_content(self, file_content):
"""
:type file_content: str
"""
self.file_content = file_content
def get_file_content(self):
"""
:rtype: str
"""
return self.file_content
def is_valid(self):
"""
:rtype: bool
"""
return self.valid
def set_valid(self, valid):
"""
:type valid: bool
"""
self.valid = valid
def add_error(self, error_message):
"""
:type error_message: str
"""
self.errors.append(error_message)
def get_errors(self):
"""
:rtype: list of str
"""
return self.errors
def get_gitignore_file(self):
try:
resp = urlopen('{0}{1}.gitignore'.format(self.get_base_url(),
self.get_name()))
self.set_file_content(resp.read())
except HTTPError as exc:
self.add_error("{0}:{1}".format(exc.code, exc.read()))
self.set_valid(False)
def clean_name(self):
name = self.get_name()
if name.endswith('.gitignore'):
self.set_name(name.replace('.gitignore', ''))
|
class Gignore(object):
def __init__(self, name):
pass
def get_base_url(self):
'''
:rtype: str
'''
pass
def set_name(self, name):
'''
:type name: str
'''
pass
def get_name(self):
'''
:rtype: str
'''
pass
def set_file_content(self, file_content):
'''
:type file_content: str
'''
pass
def get_file_content(self):
'''
:rtype: str
'''
pass
def is_valid(self):
'''
:rtype: bool
'''
pass
def set_valid(self, valid):
'''
:type valid: bool
'''
pass
def add_error(self, error_message):
'''
:type error_message: str
'''
pass
def get_errors(self):
'''
:rtype: list of str
'''
pass
def get_gitignore_file(self):
pass
def clean_name(self):
pass
| 13 | 9 | 5 | 0 | 3 | 2 | 1 | 0.69 | 1 | 1 | 0 | 0 | 12 | 0 | 12 | 12 | 80 | 14 | 39 | 21 | 26 | 27 | 38 | 20 | 25 | 2 | 1 | 1 | 14 |
4,194 |
Alir3z4/python-stop-words
|
Alir3z4_python-stop-words/stop_words/__init__.py
|
stop_words.StopWordError
|
class StopWordError(Exception):
pass
|
class StopWordError(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
4,195 |
Alir3z4/python-stop-words
|
Alir3z4_python-stop-words/stop_words/tests.py
|
stop_words.tests.StopWordsTestCase
|
class StopWordsTestCase(TestCase):
number_of_english_stop_words = 174
def test_get_stop_words(self):
sw = get_stop_words('english')
self.assertEqual(len(sw), self.number_of_english_stop_words)
def test_get_stop_words_language_mapping(self):
sw = get_stop_words('en')
self.assertEqual(len(sw), self.number_of_english_stop_words)
self.assertEqual(sw, get_stop_words('english'))
def test_get_stop_words_cache(self):
self.assertFalse('french' in stop_words.STOP_WORDS_CACHE)
sw = get_stop_words('fr')
self.assertTrue('french' in stop_words.STOP_WORDS_CACHE)
original_stop_words_dir = stop_words.STOP_WORDS_DIR
stop_words.STOP_WORDS_DIR = 'not-existing-directory'
self.assertEqual(sw, get_stop_words('french'))
stop_words.STOP_WORDS_DIR = original_stop_words_dir
try:
get_stop_words('klingon')
except:
pass
self.assertFalse('klingon' in stop_words.STOP_WORDS_CACHE)
def test_get_stop_words_unavailable_language(self):
self.assertRaises(StopWordError, get_stop_words, 'sindarin')
def test_get_stop_words_install_issue(self):
original_stop_words_dir = stop_words.STOP_WORDS_DIR
stop_words.STOP_WORDS_DIR = 'not-existing-directory'
self.assertRaises(StopWordError, get_stop_words, 'german')
stop_words.STOP_WORDS_DIR = original_stop_words_dir
def test_safe_get_stop_words(self):
self.assertRaises(StopWordError, get_stop_words, 'huttese')
self.assertEqual(safe_get_stop_words('huttese'), [])
def test_random_language_stop_words_load(self):
languages = list(LANGUAGE_MAPPING.keys()) + list(AVAILABLE_LANGUAGES)
sample = random.sample(languages, len(languages))
for language in sample:
stop_words = safe_get_stop_words(language)
self.assertTrue(
len(stop_words) > 0,
'Cannot load stopwords for {0} language'.format(language)
)
def test_filters(self):
language = 'en'
before = get_stop_words(language, False)
letter = random.choice(random.choice(before))
def remove_letter(stopwords, language):
return [word for word in stopwords if letter not in word]
stop_words.add_filter(remove_letter)
after = get_stop_words(language, False)
for stopword in after:
self.assertFalse(letter in stopword)
self.assertTrue(stop_words.remove_filter(remove_letter))
|
class StopWordsTestCase(TestCase):
def test_get_stop_words(self):
pass
def test_get_stop_words_language_mapping(self):
pass
def test_get_stop_words_cache(self):
pass
def test_get_stop_words_unavailable_language(self):
pass
def test_get_stop_words_install_issue(self):
pass
def test_safe_get_stop_words(self):
pass
def test_random_language_stop_words_load(self):
pass
def test_filters(self):
pass
def remove_letter(stopwords, language):
pass
| 10 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 1 | 2 | 1 | 0 | 8 | 0 | 8 | 80 | 61 | 9 | 52 | 25 | 42 | 0 | 49 | 25 | 39 | 2 | 2 | 1 | 12 |
4,196 |
AllTheWayDown/turgles
|
AllTheWayDown_turgles/turgles/benchmarks/object_array_access.py
|
object_array_access.TurtleView
|
class TurtleView():
"""A view of a single turtle in the massive list of turtles."""
__slots__ = ('_turtles', 'X', 'Y', 'ANGLE', 'SIZE')
def __init__(self, turtles, num):
self._turtles = turtles
self.X, self.Y, self.ANGLE, self.SIZE = (num + i for i in range(4))
def getx(self):
return self._turtles[self.X]
def setx(self, x):
self._turtles[self.X] = x
x = property(getx, setx)
def gety(self):
return self._turtles[self.Y]
def sety(self, y):
self._turtles[self.Y] = y
y = property(gety, sety)
def move(self, dx, dy):
self.x += dx
self.y += dy
|
class TurtleView():
'''A view of a single turtle in the massive list of turtles.'''
def __init__(self, turtles, num):
pass
def getx(self):
pass
def setx(self, x):
pass
def gety(self):
pass
def sety(self, y):
pass
def move(self, dx, dy):
pass
| 7 | 1 | 2 | 0 | 2 | 0 | 1 | 0.06 | 0 | 1 | 0 | 0 | 6 | 5 | 6 | 6 | 23 | 4 | 18 | 12 | 11 | 1 | 18 | 12 | 11 | 1 | 0 | 0 | 6 |
4,197 |
AllTheWayDown/turgles
|
AllTheWayDown_turgles/turgles/benchmarks/object_array_access.py
|
object_array_access.TurtleView2
|
class TurtleView2():
"""A view of a single turtle in the massive list of turtles."""
__slots__ = ('X', 'Y', 'ANGLE', 'SIZE')
def __init__(self, _, num):
self.X, self.Y, self.ANGLE, self.SIZE = (num + i for i in range(4))
def getx(self):
return TURTLES[self.X]
def setx(self, x):
TURTLES[self.X] = x
x = property(getx, setx)
def gety(self):
return TURTLES[self.Y]
def sety(self, y):
TURTLES[self.Y] = y
y = property(gety, sety)
def move(self, dx, dy):
self.x += dx
self.y += dy
|
class TurtleView2():
'''A view of a single turtle in the massive list of turtles.'''
def __init__(self, _, num):
pass
def getx(self):
pass
def setx(self, x):
pass
def gety(self):
pass
def sety(self, y):
pass
def move(self, dx, dy):
pass
| 7 | 1 | 2 | 0 | 2 | 0 | 1 | 0.06 | 0 | 1 | 0 | 0 | 6 | 4 | 6 | 6 | 23 | 5 | 17 | 11 | 10 | 1 | 17 | 11 | 10 | 1 | 0 | 0 | 6 |
4,198 |
AllTheWayDown/turgles
|
AllTheWayDown_turgles/turgles/benchmarks/object_array_access.py
|
object_array_access.TurtleView3
|
class TurtleView3():
"""A view of a single turtle in the massive list of turtles."""
__slots__ = ('view')
def __init__(self, turtles, num):
self.view = memoryview(turtles)[num:num + 4]
def getx(self):
return self.view[0]
def setx(self, x):
self.view[0] = x
x = property(getx, setx)
def gety(self):
return self.view[1]
def sety(self, y):
self.view[1] = y
y = property(gety, sety)
def move(self, dx, dy):
self.x += dx
self.y += dy
|
class TurtleView3():
'''A view of a single turtle in the massive list of turtles.'''
def __init__(self, turtles, num):
pass
def getx(self):
pass
def setx(self, x):
pass
def gety(self):
pass
def sety(self, y):
pass
def move(self, dx, dy):
pass
| 7 | 1 | 2 | 0 | 2 | 0 | 1 | 0.06 | 0 | 1 | 0 | 0 | 6 | 1 | 6 | 6 | 23 | 5 | 17 | 11 | 10 | 1 | 17 | 11 | 10 | 1 | 0 | 0 | 6 |
4,199 |
AllTheWayDown/turgles
|
AllTheWayDown_turgles/turgles/buffer.py
|
turgles.buffer.BufferManager
|
class BufferManager(object):
def __init__(self, size):
self.size = size
self.buffers = {}
self.id_to_shape = {}
def get_buffer(self, shape):
if shape in self.buffers:
return self.buffers[shape]
buffer = ShapeBuffer(shape, self.size)
self.buffers[shape] = buffer
return buffer
def create_turtle(self, id, shape, model_init, color_init):
"""Create a slice of memory for turtle data storage"""
assert id not in self.id_to_shape
data = self._create_turtle(id, shape, model_init, color_init)
self.id_to_shape[id] = shape
return data
def _create_turtle(self, id, shape, model_init, color_init):
buffer = self.get_buffer(shape)
data = buffer.new(id, model_init, color_init)
return data
def set_shape(self, id, new_shape):
"""Copies the turtle data from the old shape buffer to the new"""
old_shape = self.id_to_shape[id]
old_buffer = self.get_buffer(old_shape)
model, color = old_buffer.get(id)
new_data = self._create_turtle(id, new_shape, model, color)
old_buffer.remove(id)
self.id_to_shape[id] = new_shape
return new_data
def destroy_turtle(self, id):
shape = self.id_to_shape[id]
buffer = self.get_buffer(shape)
buffer.remove(id)
del self.id_to_shape[id]
|
class BufferManager(object):
def __init__(self, size):
pass
def get_buffer(self, shape):
pass
def create_turtle(self, id, shape, model_init, color_init):
'''Create a slice of memory for turtle data storage'''
pass
def _create_turtle(self, id, shape, model_init, color_init):
pass
def set_shape(self, id, new_shape):
'''Copies the turtle data from the old shape buffer to the new'''
pass
def destroy_turtle(self, id):
pass
| 7 | 2 | 6 | 0 | 5 | 0 | 1 | 0.06 | 1 | 1 | 1 | 0 | 6 | 3 | 6 | 6 | 42 | 7 | 33 | 20 | 26 | 2 | 33 | 20 | 26 | 2 | 1 | 1 | 7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.