status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
369
body
stringlengths
0
254k
issue_url
stringlengths
37
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
timestamp[us, tz=UTC]
language
stringclasses
5 values
commit_datetime
timestamp[us, tz=UTC]
updated_file
stringlengths
4
188
file_content
stringlengths
0
5.12M
closed
ansible/ansible
https://github.com/ansible/ansible
59,140
oVirt: when having network with spaces in name, the search fails to find it.
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> When having network with spaces in name, the search fails to find it. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ovirt_network ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.8.1 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ovirt_network: name: Storage - VLAN vm_network: False vlan_tag: 33 data_center: Default description: Storage with VLAN Tagging comment: Storage with VLAN Tagging clusters: - name: Default required: False migration: True ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below ovirtsdk4.Error: Fault reason is "Operation Failed". Fault detail is "[Cannot add Network. The name of the logical network 'Storage - VLAN 33' is already used by an existing logical network in the same data-center. ``` The problem is that in oVirt API searching object with spaces in name, it won't find it. We need to quote the search string.
https://github.com/ansible/ansible/issues/59140
https://github.com/ansible/ansible/pull/59184
d4147b55dd6b7087f8b118b901f6860583868336
b46e661d39041998ea31829e8570ec5aa1ce5f0a
2019-07-16T14:02:35Z
python
2019-09-03T19:22:37Z
lib/ansible/module_utils/ovirt.py
# -*- coding: utf-8 -*- # # Copyright (c) 2016 Red Hat, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # import inspect import os import time from abc import ABCMeta, abstractmethod from datetime import datetime from distutils.version import LooseVersion from ansible.module_utils.cloud import CloudRetry from ansible.module_utils.common._collections_compat import Mapping try: from enum import Enum # enum is a ovirtsdk4 requirement import ovirtsdk4 as sdk import ovirtsdk4.version as sdk_version import ovirtsdk4.types as otypes HAS_SDK = LooseVersion(sdk_version.VERSION) >= LooseVersion('4.3.0') except ImportError: HAS_SDK = False BYTES_MAP = { 'kib': 2**10, 'mib': 2**20, 'gib': 2**30, 'tib': 2**40, 'pib': 2**50, } def check_sdk(module): if not HAS_SDK: module.fail_json( msg='ovirtsdk4 version 4.3.0 or higher is required for this module' ) def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None): """ Convert SDK Struct type into dictionary. """ res = {} def resolve_href(value): # Fetch nested values of struct: try: value = connection.follow_link(value) except sdk.Error: value = None nested_obj = dict( (attr, convert_value(getattr(value, attr))) for attr in attributes if getattr(value, attr, None) ) nested_obj['id'] = getattr(value, 'id', None) nested_obj['href'] = getattr(value, 'href', None) return nested_obj def remove_underscore(val): if val.startswith('_'): val = val[1:] remove_underscore(val) return val def convert_value(value): nested = False if isinstance(value, sdk.Struct): if not fetch_nested or not value.href: return get_dict_of_struct(value) return resolve_href(value) elif isinstance(value, Enum) or isinstance(value, datetime): return str(value) elif isinstance(value, list) or isinstance(value, sdk.List): if isinstance(value, sdk.List) and fetch_nested and value.href: try: value = connection.follow_link(value) nested = True except sdk.Error: value = [] ret = [] for i in value: if isinstance(i, sdk.Struct): if fetch_nested and i.href: ret.append(resolve_href(i)) elif not nested: ret.append(get_dict_of_struct(i)) else: nested_obj = dict( (attr, convert_value(getattr(i, attr))) for attr in attributes if getattr(i, attr, None) ) nested_obj['id'] = getattr(i, 'id', None) ret.append(nested_obj) elif isinstance(i, Enum): ret.append(str(i)) else: ret.append(i) return ret else: return value if struct is not None: for key, value in struct.__dict__.items(): if value is None: continue key = remove_underscore(key) res[key] = convert_value(value) return res def engine_version(connection): """ Return string representation of oVirt engine version. """ engine_api = connection.system_service().get() engine_version = engine_api.product_info.version return '%s.%s' % (engine_version.major, engine_version.minor) def create_connection(auth): """ Create a connection to Python SDK, from task `auth` parameter. If user doesnt't have SSO token the `auth` dictionary has following parameters mandatory: url, username, password If user has SSO token the `auth` dictionary has following parameters mandatory: url, token The `ca_file` parameter is mandatory in case user want to use secure connection, in case user want to use insecure connection, it's mandatory to send insecure=True. :param auth: dictionary which contains needed values for connection creation :return: Python SDK connection """ url = auth.get('url') if url is None and auth.get('hostname') is not None: url = 'https://{0}/ovirt-engine/api'.format(auth.get('hostname')) return sdk.Connection( url=url, username=auth.get('username'), password=auth.get('password'), ca_file=auth.get('ca_file', None), insecure=auth.get('insecure', False), token=auth.get('token', None), kerberos=auth.get('kerberos', None), headers=auth.get('headers', None), ) def convert_to_bytes(param): """ This method convert units to bytes, which follow IEC standard. :param param: value to be converted """ if param is None: return None # Get rid of whitespaces: param = ''.join(param.split()) # Convert to bytes: if len(param) > 3 and param[-3].lower() in ['k', 'm', 'g', 't', 'p']: return int(param[:-3]) * BYTES_MAP.get(param[-3:].lower(), 1) elif param.isdigit(): return int(param) * 2**10 else: raise ValueError( "Unsupported value(IEC supported): '{value}'".format(value=param) ) def follow_link(connection, link): """ This method returns the entity of the element which link points to. :param connection: connection to the Python SDK :param link: link of the entity :return: entity which link points to """ if link: return connection.follow_link(link) else: return None def get_link_name(connection, link): """ This method returns the name of the element which link points to. :param connection: connection to the Python SDK :param link: link of the entity :return: name of the entity, which link points to """ if link: return connection.follow_link(link).name else: return None def equal(param1, param2, ignore_case=False): """ Compare two parameters and return if they are equal. This parameter doesn't run equal operation if first parameter is None. With this approach we don't run equal operation in case user don't specify parameter in their task. :param param1: user inputted parameter :param param2: value of entity parameter :return: True if parameters are equal or first parameter is None, otherwise False """ if param1 is not None: if ignore_case: return param1.lower() == param2.lower() return param1 == param2 return True def search_by_attributes(service, list_params=None, **kwargs): """ Search for the entity by attributes. Nested entities don't support search via REST, so in case using search for nested entity we return all entities and filter them by specified attributes. """ list_params = list_params or {} # Check if 'list' method support search(look for search parameter): if 'search' in inspect.getargspec(service.list)[0]: res = service.list( search=' and '.join('{0}={1}'.format(k, v) for k, v in kwargs.items()), **list_params ) else: res = [ e for e in service.list(**list_params) if len([ k for k, v in kwargs.items() if getattr(e, k, None) == v ]) == len(kwargs) ] res = res or [None] return res[0] def search_by_name(service, name, **kwargs): """ Search for the entity by its name. Nested entities don't support search via REST, so in case using search for nested entity we return all entities and filter them by name. :param service: service of the entity :param name: name of the entity :return: Entity object returned by Python SDK """ # Check if 'list' method support search(look for search parameter): if 'search' in inspect.getargspec(service.list)[0]: res = service.list( search="name={name}".format(name=name) ) else: res = [e for e in service.list() if e.name == name] if kwargs: res = [ e for e in service.list() if len([ k for k, v in kwargs.items() if getattr(e, k, None) == v ]) == len(kwargs) ] res = res or [None] return res[0] def get_entity(service, get_params=None): """ Ignore SDK Error in case of getting an entity from service. """ entity = None try: if get_params is not None: entity = service.get(**get_params) else: entity = service.get() except sdk.Error: # We can get here 404, we should ignore it, in case # of removing entity for example. pass return entity def get_id_by_name(service, name, raise_error=True, ignore_case=False): """ Search an entity ID by it's name. """ entity = search_by_name(service, name) if entity is not None: return entity.id if raise_error: raise Exception("Entity '%s' was not found." % name) def wait( service, condition, fail_condition=lambda e: False, timeout=180, wait=True, poll_interval=3, ): """ Wait until entity fulfill expected condition. :param service: service of the entity :param condition: condition to be fulfilled :param fail_condition: if this condition is true, raise Exception :param timeout: max time to wait in seconds :param wait: if True wait for condition, if False don't wait :param poll_interval: Number of seconds we should wait until next condition check """ # Wait until the desired state of the entity: if wait: start = time.time() while time.time() < start + timeout: # Exit if the condition of entity is valid: entity = get_entity(service) if condition(entity): return elif fail_condition(entity): raise Exception("Error while waiting on result state of the entity.") # Sleep for `poll_interval` seconds if none of the conditions apply: time.sleep(float(poll_interval)) raise Exception("Timeout exceed while waiting on result state of the entity.") def __get_auth_dict(): OVIRT_URL = os.environ.get('OVIRT_URL') OVIRT_HOSTNAME = os.environ.get('OVIRT_HOSTNAME') OVIRT_USERNAME = os.environ.get('OVIRT_USERNAME') OVIRT_PASSWORD = os.environ.get('OVIRT_PASSWORD') OVIRT_TOKEN = os.environ.get('OVIRT_TOKEN') OVIRT_CAFILE = os.environ.get('OVIRT_CAFILE') OVIRT_INSECURE = OVIRT_CAFILE is None env_vars = None if OVIRT_URL is None and OVIRT_HOSTNAME is not None: OVIRT_URL = 'https://{0}/ovirt-engine/api'.format(OVIRT_HOSTNAME) if OVIRT_URL and ((OVIRT_USERNAME and OVIRT_PASSWORD) or OVIRT_TOKEN): env_vars = { 'url': OVIRT_URL, 'username': OVIRT_USERNAME, 'password': OVIRT_PASSWORD, 'insecure': OVIRT_INSECURE, 'token': OVIRT_TOKEN, 'ca_file': OVIRT_CAFILE, } if env_vars is not None: auth = dict(default=env_vars, type='dict') else: auth = dict(required=True, type='dict') return auth def ovirt_info_full_argument_spec(**kwargs): """ Extend parameters of info module with parameters which are common to all oVirt info modules. :param kwargs: kwargs to be extended :return: extended dictionary with common parameters """ spec = dict( auth=__get_auth_dict(), fetch_nested=dict(default=False, type='bool'), nested_attributes=dict(type='list', default=list()), ) spec.update(kwargs) return spec # Left for third-party module compatibility def ovirt_facts_full_argument_spec(**kwargs): """ This is deprecated. Please use ovirt_info_full_argument_spec instead! :param kwargs: kwargs to be extended :return: extended dictionary with common parameters """ return ovirt_info_full_argument_spec(**kwargs) def ovirt_full_argument_spec(**kwargs): """ Extend parameters of module with parameters which are common to all oVirt modules. :param kwargs: kwargs to be extended :return: extended dictionary with common parameters """ spec = dict( auth=__get_auth_dict(), timeout=dict(default=180, type='int'), wait=dict(default=True, type='bool'), poll_interval=dict(default=3, type='int'), fetch_nested=dict(default=False, type='bool'), nested_attributes=dict(type='list', default=list()), ) spec.update(kwargs) return spec def check_params(module): """ Most modules must have either `name` or `id` specified. """ if module.params.get('name') is None and module.params.get('id') is None: module.fail_json(msg='"name" or "id" is required') def engine_supported(connection, version): return LooseVersion(engine_version(connection)) >= LooseVersion(version) def check_support(version, connection, module, params): """ Check if parameters used by user are supported by oVirt Python SDK and oVirt engine. """ api_version = LooseVersion(engine_version(connection)) version = LooseVersion(version) for param in params: if module.params.get(param) is not None: return LooseVersion(sdk_version.VERSION) >= version and api_version >= version return True class BaseModule(object): """ This is base class for oVirt modules. oVirt modules should inherit this class and override method to customize specific needs of the module. The only abstract method of this class is `build_entity`, which must to be implemented in child class. """ __metaclass__ = ABCMeta def __init__(self, connection, module, service, changed=False): self._connection = connection self._module = module self._service = service self._changed = changed self._diff = {'after': dict(), 'before': dict()} @property def changed(self): return self._changed @changed.setter def changed(self, changed): if not self._changed: self._changed = changed @abstractmethod def build_entity(self): """ This method should return oVirt Python SDK type, which we want to create or update, initialized by values passed by Ansible module. For example if we want to create VM, we will return following: types.Vm(name=self._module.params['vm_name']) :return: Specific instance of sdk.Struct. """ pass def param(self, name, default=None): """ Return a module parameter specified by it's name. """ return self._module.params.get(name, default) def update_check(self, entity): """ This method handle checks whether the entity values are same as values passed to ansible module. By default we don't compare any values. :param entity: Entity we want to compare with Ansible module values. :return: True if values are same, so we don't need to update the entity. """ return True def pre_create(self, entity): """ This method is called right before entity is created. :param entity: Entity to be created or updated. """ pass def post_create(self, entity): """ This method is called right after entity is created. :param entity: Entity which was created. """ pass def post_update(self, entity): """ This method is called right after entity is updated. :param entity: Entity which was updated. """ pass def diff_update(self, after, update): for k, v in update.items(): if isinstance(v, Mapping): after[k] = self.diff_update(after.get(k, dict()), v) else: after[k] = update[k] return after def create( self, entity=None, result_state=None, fail_condition=lambda e: False, search_params=None, update_params=None, _wait=None, force_create=False, **kwargs ): """ Method which is called when state of the entity is 'present'. If user don't provide `entity` parameter the entity is searched using `search_params` parameter. If entity is found it's updated, whether the entity should be updated is checked by `update_check` method. The corresponding updated entity is build by `build_entity` method. Function executed after entity is created can optionally be specified in `post_create` parameter. Function executed after entity is updated can optionally be specified in `post_update` parameter. :param entity: Entity we want to update, if exists. :param result_state: State which should entity has in order to finish task. :param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised. :param search_params: Dictionary of parameters to be used for search. :param update_params: The params which should be passed to update method. :param kwargs: Additional parameters passed when creating entity. :return: Dictionary with values returned by Ansible module. """ if entity is None and not force_create: entity = self.search_entity(search_params) self.pre_create(entity) if entity: # Entity exists, so update it: entity_service = self._service.service(entity.id) if not self.update_check(entity): new_entity = self.build_entity() if not self._module.check_mode: update_params = update_params or {} updated_entity = entity_service.update( new_entity, **update_params ) self.post_update(entity) # Update diffs only if user specified --diff parameter, # so we don't useless overload API: if self._module._diff: before = get_dict_of_struct( entity, self._connection, fetch_nested=True, attributes=['name'], ) after = before.copy() self.diff_update(after, get_dict_of_struct(new_entity)) self._diff['before'] = before self._diff['after'] = after self.changed = True else: # Entity don't exists, so create it: if not self._module.check_mode: entity = self._service.add( self.build_entity(), **kwargs ) self.post_create(entity) self.changed = True if not self._module.check_mode: # Wait for the entity to be created and to be in the defined state: entity_service = self._service.service(entity.id) def state_condition(entity): return entity if result_state: def state_condition(entity): return entity and entity.status == result_state wait( service=entity_service, condition=state_condition, fail_condition=fail_condition, wait=_wait if _wait is not None else self._module.params['wait'], timeout=self._module.params['timeout'], poll_interval=self._module.params['poll_interval'], ) return { 'changed': self.changed, 'id': getattr(entity, 'id', None), type(entity).__name__.lower(): get_dict_of_struct( struct=entity, connection=self._connection, fetch_nested=self._module.params.get('fetch_nested'), attributes=self._module.params.get('nested_attributes'), ), 'diff': self._diff, } def pre_remove(self, entity): """ This method is called right before entity is removed. :param entity: Entity which we want to remove. """ pass def entity_name(self, entity): return "{e_type} '{e_name}'".format( e_type=type(entity).__name__.lower(), e_name=getattr(entity, 'name', None), ) def remove(self, entity=None, search_params=None, **kwargs): """ Method which is called when state of the entity is 'absent'. If user don't provide `entity` parameter the entity is searched using `search_params` parameter. If entity is found it's removed. Function executed before remove is executed can optionally be specified in `pre_remove` parameter. :param entity: Entity we want to remove. :param search_params: Dictionary of parameters to be used for search. :param kwargs: Additional parameters passed when removing entity. :return: Dictionary with values returned by Ansible module. """ if entity is None: entity = self.search_entity(search_params) if entity is None: return { 'changed': self.changed, 'msg': "Entity wasn't found." } self.pre_remove(entity) entity_service = self._service.service(entity.id) if not self._module.check_mode: entity_service.remove(**kwargs) wait( service=entity_service, condition=lambda entity: not entity, wait=self._module.params['wait'], timeout=self._module.params['timeout'], poll_interval=self._module.params['poll_interval'], ) self.changed = True return { 'changed': self.changed, 'id': entity.id, type(entity).__name__.lower(): get_dict_of_struct( struct=entity, connection=self._connection, fetch_nested=self._module.params.get('fetch_nested'), attributes=self._module.params.get('nested_attributes'), ), } def action( self, action, entity=None, action_condition=lambda e: e, wait_condition=lambda e: e, fail_condition=lambda e: False, pre_action=lambda e: e, post_action=lambda e: None, search_params=None, **kwargs ): """ This method is executed when we want to change the state of some oVirt entity. The action to be executed on oVirt service is specified by `action` parameter. Whether the action should be executed can be specified by passing `action_condition` parameter. State which the entity should be in after execution of the action can be specified by `wait_condition` parameter. Function executed before an action on entity can optionally be specified in `pre_action` parameter. Function executed after an action on entity can optionally be specified in `post_action` parameter. :param action: Action which should be executed by service on entity. :param entity: Entity we want to run action on. :param action_condition: Function which is executed when checking if action should be executed. :param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised. :param wait_condition: Function which is executed when waiting on result state. :param pre_action: Function which is executed before running the action. :param post_action: Function which is executed after running the action. :param search_params: Dictionary of parameters to be used for search. :param kwargs: Additional parameters passed to action. :return: Dictionary with values returned by Ansible module. """ if entity is None: entity = self.search_entity(search_params) entity = pre_action(entity) if entity is None: self._module.fail_json( msg="Entity not found, can't run action '{0}'.".format( action ) ) entity_service = self._service.service(entity.id) entity = entity_service.get() if action_condition(entity): if not self._module.check_mode: getattr(entity_service, action)(**kwargs) self.changed = True post_action(entity) wait( service=self._service.service(entity.id), condition=wait_condition, fail_condition=fail_condition, wait=self._module.params['wait'], timeout=self._module.params['timeout'], poll_interval=self._module.params['poll_interval'], ) return { 'changed': self.changed, 'id': entity.id, type(entity).__name__.lower(): get_dict_of_struct( struct=entity, connection=self._connection, fetch_nested=self._module.params.get('fetch_nested'), attributes=self._module.params.get('nested_attributes'), ), 'diff': self._diff, } def wait_for_import(self, condition=lambda e: True): if self._module.params['wait']: start = time.time() timeout = self._module.params['timeout'] poll_interval = self._module.params['poll_interval'] while time.time() < start + timeout: entity = self.search_entity() if entity and condition(entity): return entity time.sleep(poll_interval) def search_entity(self, search_params=None, list_params=None): """ Always first try to search by `ID`, if ID isn't specified, check if user constructed special search in `search_params`, if not search by `name`. """ entity = None if 'id' in self._module.params and self._module.params['id'] is not None: entity = get_entity(self._service.service(self._module.params['id']), get_params=list_params) elif search_params is not None: entity = search_by_attributes(self._service, list_params=list_params, **search_params) elif self._module.params.get('name') is not None: entity = search_by_attributes(self._service, list_params=list_params, name=self._module.params['name']) return entity def _get_major(self, full_version): if full_version is None or full_version == "": return None if isinstance(full_version, otypes.Version): return int(full_version.major) return int(full_version.split('.')[0]) def _get_minor(self, full_version): if full_version is None or full_version == "": return None if isinstance(full_version, otypes.Version): return int(full_version.minor) return int(full_version.split('.')[1]) def _sdk4_error_maybe(): """ Allow for ovirtsdk4 not being installed. """ if HAS_SDK: return sdk.Error return type(None) class OvirtRetry(CloudRetry): base_class = _sdk4_error_maybe() @staticmethod def status_code_from_exception(error): return error.code @staticmethod def found(response_code, catch_extra_error_codes=None): # This is a list of error codes to retry. retry_on = [ # HTTP status: Conflict 409, ] if catch_extra_error_codes: retry_on.extend(catch_extra_error_codes) return response_code in retry_on
closed
ansible/ansible
https://github.com/ansible/ansible
58,456
Bitbucket_access_key list object is not an iterator (python 2.7 compat)
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY Invoking the 'bitbucket_access_key' of ansible 2.8 installed with pip 19.1 on python 2.7 (Ubuntu 14.04...) gives the following error. ``` line 114, in <module>\n _ansiballz_main()\n File \"/home/ubuntu/.ansible/tmp/ansible-tmp-1561635678.17-267746543539506/AnsiballZ_bitbucket_access_key.py\", line 106, in _ansiballz_main\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\n File \"/home/ubuntu/.ansible/tmp/ansible-tmp-1561635678.17-267746543539506/AnsiballZ_bitbucket_access_key.py\", line 49, in invoke_module\n imp.load_module('__main__', mod, module, MOD_DESC)\n File \"/tmp/ansible_bitbucket_access_key_payload_yyexZQ/__main__.py\", line 285, in <module>\n File \"/tmp/ansible_bitbucket_access_key_payload_yyexZQ/__main__.py\", line 256, in main\n File \"/tmp/ansible_bitbucket_access_key_payload_yyexZQ/__main__.py\", line 169, in get_existing_deploy_key\nTypeError: list object is not an iterator\n", "module_stdout": "", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1} ``` ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME bitbucket_access_key module ##### ANSIBLE VERSION ``` /usr/local/lib/python2.7/dist-packages/cryptography/hazmat/primitives/constant_time.py:26: CryptographyDeprecationWarning: Support for your Python version is deprecated. The next version of cryptography will remove support. Please upgrade to a release (2.7.7+) that supports hmac.compare_digest as soon as possible. utils.PersistentlyDeprecated2018, ansible 2.8.1 config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/ubuntu/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /home/ubuntu/.local/lib/python2.7/site-packages/ansible executable location = /usr/local/bin/ansible python version = 2.7.6 (default, Nov 13 2018, 12:45:42) [GCC 4.8.4] ``` ##### Patch The following patch fixes the issue: ``` --- bitbucket_access_key.py.orig 2019-06-27 12:17:18.168641126 +0000 +++ bitbucket_access_key.py 2019-06-27 12:16:59.664723635 +0000 @@ -166,7 +166,7 @@ if info['status'] != 200: module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info)) - res = next(filter(lambda v: v['label'] == module.params['label'], content['values']), None) + res = next(iter(filter(lambda v: v['label'] == module.params['label'], content['values'])), None) if res is not None: return res ```
https://github.com/ansible/ansible/issues/58456
https://github.com/ansible/ansible/pull/58816
5f9fe6b9c8336a24a81632ff4f7039dcf559f30d
9797857a673c4eee253de54cd7e33c06b51e3dba
2019-06-27T12:26:05Z
python
2019-09-03T19:24:41Z
lib/ansible/modules/source_control/bitbucket/bitbucket_access_key.py
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2019, Evgeniy Krysanov <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community', } DOCUMENTATION = r''' --- module: bitbucket_access_key short_description: Manages Bitbucket repository access keys description: - Manages Bitbucket repository access keys (also called deploy keys). version_added: "2.8" author: - Evgeniy Krysanov (@catcombo) options: client_id: description: - The OAuth consumer key. - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used. type: str client_secret: description: - The OAuth consumer secret. - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used. type: str repository: description: - The repository name. type: str required: true username: description: - The repository owner. type: str required: true key: description: - The SSH public key. type: str label: description: - The key label. type: str required: true state: description: - Indicates desired state of the access key. type: str required: true choices: [ absent, present ] notes: - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth. - Bitbucket OAuth consumer should have permissions to read and administrate account repositories. - Check mode is supported. ''' EXAMPLES = r''' - name: Create access key bitbucket_access_key: repository: 'bitbucket-repo' username: bitbucket_username key: '{{lookup("file", "bitbucket.pub") }}' label: 'Bitbucket' state: present - name: Delete access key bitbucket_access_key: repository: bitbucket-repo username: bitbucket_username label: Bitbucket state: absent ''' RETURN = r''' # ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.source_control.bitbucket import BitbucketHelper error_messages = { 'required_key': '`key` is required when the `state` is `present`', 'required_permission': 'OAuth consumer `client_id` should have permissions to read and administrate the repository', 'invalid_username_or_repo': 'Invalid `repository` or `username`', 'invalid_key': 'Invalid SSH key or key is already in use', } BITBUCKET_API_ENDPOINTS = { 'deploy-key-list': '%s/2.0/repositories/{username}/{repo_slug}/deploy-keys/' % BitbucketHelper.BITBUCKET_API_URL, 'deploy-key-detail': '%s/2.0/repositories/{username}/{repo_slug}/deploy-keys/{key_id}' % BitbucketHelper.BITBUCKET_API_URL, } def get_existing_deploy_key(module, bitbucket): """ Search for an existing deploy key on Bitbucket with the label specified in module param `label` :param module: instance of the :class:`AnsibleModule` :param bitbucket: instance of the :class:`BitbucketHelper` :return: existing deploy key or None if not found :rtype: dict or None Return example:: { "id": 123, "label": "mykey", "created_on": "2019-03-23T10:15:21.517377+00:00", "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5", "type": "deploy_key", "comment": "", "last_used": None, "repository": { "links": { "self": { "href": "https://api.bitbucket.org/2.0/repositories/mleu/test" }, "html": { "href": "https://bitbucket.org/mleu/test" }, "avatar": { "href": "..." } }, "type": "repository", "name": "test", "full_name": "mleu/test", "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}" }, "links": { "self": { "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123" } }, } """ content = { 'next': BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( username=module.params['username'], repo_slug=module.params['repository'], ) } # Look through the all response pages in search of deploy key we need while 'next' in content: info, content = bitbucket.request( api_url=content['next'], method='GET', ) if info['status'] == 404: module.fail_json(msg=error_messages['invalid_username_or_repo']) if info['status'] == 403: module.fail_json(msg=error_messages['required_permission']) if info['status'] != 200: module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info)) res = next(filter(lambda v: v['label'] == module.params['label'], content['values']), None) if res is not None: return res return None def create_deploy_key(module, bitbucket): info, content = bitbucket.request( api_url=BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( username=module.params['username'], repo_slug=module.params['repository'], ), method='POST', data={ 'key': module.params['key'], 'label': module.params['label'], }, ) if info['status'] == 404: module.fail_json(msg=error_messages['invalid_username_or_repo']) if info['status'] == 403: module.fail_json(msg=error_messages['required_permission']) if info['status'] == 400: module.fail_json(msg=error_messages['invalid_key']) if info['status'] != 200: module.fail_json(msg='Failed to create deploy key `{label}`: {info}'.format( label=module.params['label'], info=info, )) def delete_deploy_key(module, bitbucket, key_id): info, content = bitbucket.request( api_url=BITBUCKET_API_ENDPOINTS['deploy-key-detail'].format( username=module.params['username'], repo_slug=module.params['repository'], key_id=key_id, ), method='DELETE', ) if info['status'] == 404: module.fail_json(msg=error_messages['invalid_username_or_repo']) if info['status'] == 403: module.fail_json(msg=error_messages['required_permission']) if info['status'] != 204: module.fail_json(msg='Failed to delete deploy key `{label}`: {info}'.format( label=module.params['label'], info=info, )) def main(): argument_spec = BitbucketHelper.bitbucket_argument_spec() argument_spec.update( repository=dict(type='str', required=True), username=dict(type='str', required=True), key=dict(type='str'), label=dict(type='str', required=True), state=dict(type='str', choices=['present', 'absent'], required=True), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) bitbucket = BitbucketHelper(module) key = module.params['key'] state = module.params['state'] # Check parameters if (key is None) and (state == 'present'): module.fail_json(msg=error_messages['required_key']) # Retrieve access token for authorized API requests bitbucket.fetch_access_token() # Retrieve existing deploy key (if any) existing_deploy_key = get_existing_deploy_key(module, bitbucket) changed = False # Create new deploy key in case it doesn't exists if not existing_deploy_key and (state == 'present'): if not module.check_mode: create_deploy_key(module, bitbucket) changed = True # Update deploy key if the old value does not match the new one elif existing_deploy_key and (state == 'present'): if not key.startswith(existing_deploy_key.get('key')): if not module.check_mode: # Bitbucket doesn't support update key for the same label, # so we need to delete the old one first delete_deploy_key(module, bitbucket, existing_deploy_key['id']) create_deploy_key(module, bitbucket) changed = True # Delete deploy key elif existing_deploy_key and (state == 'absent'): if not module.check_mode: delete_deploy_key(module, bitbucket, existing_deploy_key['id']) changed = True module.exit_json(changed=changed) if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
59,797
redfish_facts: GetLogs can raise KeyError
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> The GetLogs command can raise a KeyError if any of the properties it tries to read are not present. The properties being gathered are not required, so it should not be an error condition for them to be absent. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> redfish_facts.py redfish_utils.py ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.9.0.dev0 config file = $HOME/.ansible.cfg configured module search path = ['$HOME/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = $HOME/Development/git/ansible/lib/ansible executable location = $HOME/Development/git/ansible/bin/ansible python version = 3.6.5 (default, Apr 25 2018, 14:26:36) [GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below [no output] ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Applies to Redfish OOB controllers. ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Run the GetLogs commands against a Redfish service that does not include one or more of these properties in the `LogServices` resources: - `Description` - `Name` - `Created` - `Message` - `Severity` <!--- Paste example playbooks or commands between quotes below --> ```yaml --- - hosts: myhosts connection: local name: Get Manager Logs gather_facts: False vars: datatype: Logs tasks: - name: Define output file include_tasks: create_output_file.yml - name: Get Manager Logs redfish_facts: category: Manager command: GetLogs baseuri: "{{ baseuri }}" username: "{{ username }}" password: "{{ password }}" register: result - name: Copy results to output file copy: content: "{{ result | to_nice_json }}" dest: "{{ template }}.json" ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Playbook runs without error ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below redfish_utils.py, line 316, in get_logs KeyError: 'Created' ```
https://github.com/ansible/ansible/issues/59797
https://github.com/ansible/ansible/pull/59877
86b38a0ead535efa9a8d5340bc9c5de30fc8500f
088d821f7514dd9276fb1feb48ce4592736ddf68
2019-07-30T16:20:29Z
python
2019-09-04T01:44:07Z
changelogs/fragments/59877-fix-keyerror-in-redfish-getlogs.yaml
closed
ansible/ansible
https://github.com/ansible/ansible
59,797
redfish_facts: GetLogs can raise KeyError
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> The GetLogs command can raise a KeyError if any of the properties it tries to read are not present. The properties being gathered are not required, so it should not be an error condition for them to be absent. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> redfish_facts.py redfish_utils.py ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.9.0.dev0 config file = $HOME/.ansible.cfg configured module search path = ['$HOME/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = $HOME/Development/git/ansible/lib/ansible executable location = $HOME/Development/git/ansible/bin/ansible python version = 3.6.5 (default, Apr 25 2018, 14:26:36) [GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below [no output] ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Applies to Redfish OOB controllers. ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Run the GetLogs commands against a Redfish service that does not include one or more of these properties in the `LogServices` resources: - `Description` - `Name` - `Created` - `Message` - `Severity` <!--- Paste example playbooks or commands between quotes below --> ```yaml --- - hosts: myhosts connection: local name: Get Manager Logs gather_facts: False vars: datatype: Logs tasks: - name: Define output file include_tasks: create_output_file.yml - name: Get Manager Logs redfish_facts: category: Manager command: GetLogs baseuri: "{{ baseuri }}" username: "{{ username }}" password: "{{ password }}" register: result - name: Copy results to output file copy: content: "{{ result | to_nice_json }}" dest: "{{ template }}.json" ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Playbook runs without error ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below redfish_utils.py, line 316, in get_logs KeyError: 'Created' ```
https://github.com/ansible/ansible/issues/59797
https://github.com/ansible/ansible/pull/59877
86b38a0ead535efa9a8d5340bc9c5de30fc8500f
088d821f7514dd9276fb1feb48ce4592736ddf68
2019-07-30T16:20:29Z
python
2019-09-04T01:44:07Z
lib/ansible/module_utils/redfish_utils.py
# Copyright (c) 2017-2018 Dell EMC Inc. # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type import json from ansible.module_utils.urls import open_url from ansible.module_utils._text import to_text from ansible.module_utils.six.moves import http_client from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'} POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', 'OData-Version': '4.0'} PATCH_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', 'OData-Version': '4.0'} DELETE_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'} class RedfishUtils(object): def __init__(self, creds, root_uri, timeout, module): self.root_uri = root_uri self.creds = creds self.timeout = timeout self.module = module self.service_root = '/redfish/v1/' self._init_session() # The following functions are to send GET/POST/PATCH/DELETE requests def get_request(self, uri): try: resp = open_url(uri, method="GET", headers=GET_HEADERS, url_username=self.creds['user'], url_password=self.creds['pswd'], force_basic_auth=True, validate_certs=False, follow_redirects='all', use_proxy=False, timeout=self.timeout) data = json.loads(resp.read()) headers = dict((k.lower(), v) for (k, v) in resp.info().items()) except HTTPError as e: msg = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on GET request to '%s', extended message: '%s'" % (e.code, uri, msg)} except URLError as e: return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'" % (uri, e.reason)} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, 'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))} return {'ret': True, 'data': data, 'headers': headers} def post_request(self, uri, pyld): try: resp = open_url(uri, data=json.dumps(pyld), headers=POST_HEADERS, method="POST", url_username=self.creds['user'], url_password=self.creds['pswd'], force_basic_auth=True, validate_certs=False, follow_redirects='all', use_proxy=False, timeout=self.timeout) except HTTPError as e: msg = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on POST request to '%s', extended message: '%s'" % (e.code, uri, msg)} except URLError as e: return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'" % (uri, e.reason)} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, 'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))} return {'ret': True, 'resp': resp} def patch_request(self, uri, pyld): headers = PATCH_HEADERS r = self.get_request(uri) if r['ret']: # Get etag from etag header or @odata.etag property etag = r['headers'].get('etag') if not etag: etag = r['data'].get('@odata.etag') if etag: # Make copy of headers and add If-Match header headers = dict(headers) headers['If-Match'] = etag try: resp = open_url(uri, data=json.dumps(pyld), headers=headers, method="PATCH", url_username=self.creds['user'], url_password=self.creds['pswd'], force_basic_auth=True, validate_certs=False, follow_redirects='all', use_proxy=False, timeout=self.timeout) except HTTPError as e: msg = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on PATCH request to '%s', extended message: '%s'" % (e.code, uri, msg)} except URLError as e: return {'ret': False, 'msg': "URL Error on PATCH request to '%s': '%s'" % (uri, e.reason)} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, 'msg': "Failed PATCH request to '%s': '%s'" % (uri, to_text(e))} return {'ret': True, 'resp': resp} def delete_request(self, uri, pyld): try: resp = open_url(uri, data=json.dumps(pyld), headers=DELETE_HEADERS, method="DELETE", url_username=self.creds['user'], url_password=self.creds['pswd'], force_basic_auth=True, validate_certs=False, follow_redirects='all', use_proxy=False, timeout=self.timeout) except HTTPError as e: msg = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on DELETE request to '%s', extended message: '%s'" % (e.code, uri, msg)} except URLError as e: return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'" % (uri, e.reason)} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, 'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))} return {'ret': True, 'resp': resp} @staticmethod def _get_extended_message(error): """ Get Redfish ExtendedInfo message from response payload if present :param error: an HTTPError exception :type error: HTTPError :return: the ExtendedInfo message if present, else standard HTTP error """ msg = http_client.responses.get(error.code, '') if error.code >= 400: try: body = error.read().decode('utf-8') data = json.loads(body) ext_info = data['error']['@Message.ExtendedInfo'] msg = ext_info[0]['Message'] except Exception: pass return msg def _init_session(self): pass def _find_accountservice_resource(self): response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] if 'AccountService' not in data: return {'ret': False, 'msg': "AccountService resource not found"} else: account_service = data["AccountService"]["@odata.id"] response = self.get_request(self.root_uri + account_service) if response['ret'] is False: return response data = response['data'] accounts = data['Accounts']['@odata.id'] if accounts[-1:] == '/': accounts = accounts[:-1] self.accounts_uri = accounts return {'ret': True} def _find_sessionservice_resource(self): response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] if 'SessionService' not in data: return {'ret': False, 'msg': "SessionService resource not found"} else: session_service = data["SessionService"]["@odata.id"] response = self.get_request(self.root_uri + session_service) if response['ret'] is False: return response data = response['data'] sessions = data['Sessions']['@odata.id'] if sessions[-1:] == '/': sessions = sessions[:-1] self.sessions_uri = sessions return {'ret': True} def _find_systems_resource(self): response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] if 'Systems' not in data: return {'ret': False, 'msg': "Systems resource not found"} response = self.get_request(self.root_uri + data['Systems']['@odata.id']) if response['ret'] is False: return response self.systems_uris = [ i['@odata.id'] for i in response['data'].get('Members', [])] if not self.systems_uris: return { 'ret': False, 'msg': "ComputerSystem's Members array is either empty or missing"} return {'ret': True} def _find_updateservice_resource(self): response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] if 'UpdateService' not in data: return {'ret': False, 'msg': "UpdateService resource not found"} else: update = data["UpdateService"]["@odata.id"] self.update_uri = update response = self.get_request(self.root_uri + update) if response['ret'] is False: return response data = response['data'] firmware_inventory = data['FirmwareInventory'][u'@odata.id'] self.firmware_uri = firmware_inventory return {'ret': True} def _find_chassis_resource(self): chassis_service = [] response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] if 'Chassis' not in data: return {'ret': False, 'msg': "Chassis resource not found"} else: chassis = data["Chassis"]["@odata.id"] response = self.get_request(self.root_uri + chassis) if response['ret'] is False: return response data = response['data'] for member in data[u'Members']: chassis_service.append(member[u'@odata.id']) self.chassis_uri_list = chassis_service return {'ret': True} def _find_managers_resource(self): response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] if 'Managers' not in data: return {'ret': False, 'msg': "Manager resource not found"} else: manager = data["Managers"]["@odata.id"] response = self.get_request(self.root_uri + manager) if response['ret'] is False: return response data = response['data'] for member in data[u'Members']: manager_service = member[u'@odata.id'] self.manager_uri = manager_service return {'ret': True} def get_logs(self): log_svcs_uri_list = [] list_of_logs = [] # Find LogService response = self.get_request(self.root_uri + self.manager_uri) if response['ret'] is False: return response data = response['data'] if 'LogServices' not in data: return {'ret': False, 'msg': "LogServices resource not found"} # Find all entries in LogServices logs_uri = data["LogServices"]["@odata.id"] response = self.get_request(self.root_uri + logs_uri) if response['ret'] is False: return response data = response['data'] for log_svcs_entry in data[u'Members']: response = self.get_request(self.root_uri + log_svcs_entry[u'@odata.id']) if response['ret'] is False: return response _data = response['data'] log_svcs_uri_list.append(_data['Entries'][u'@odata.id']) # For each entry in LogServices, get log name and all log entries for log_svcs_uri in log_svcs_uri_list: logs = {} list_of_log_entries = [] response = self.get_request(self.root_uri + log_svcs_uri) if response['ret'] is False: return response data = response['data'] logs['Description'] = data['Description'] # Get all log entries for each type of log found for logEntry in data[u'Members']: # I only extract some fields - Are these entry names standard? list_of_log_entries.append(dict( Name=logEntry[u'Name'], Created=logEntry[u'Created'], Message=logEntry[u'Message'], Severity=logEntry[u'Severity'])) log_name = log_svcs_uri.split('/')[-1] logs[log_name] = list_of_log_entries list_of_logs.append(logs) # list_of_logs[logs{list_of_log_entries[entry{}]}] return {'ret': True, 'entries': list_of_logs} def clear_logs(self): # Find LogService response = self.get_request(self.root_uri + self.manager_uri) if response['ret'] is False: return response data = response['data'] if 'LogServices' not in data: return {'ret': False, 'msg': "LogServices resource not found"} # Find all entries in LogServices logs_uri = data["LogServices"]["@odata.id"] response = self.get_request(self.root_uri + logs_uri) if response['ret'] is False: return response data = response['data'] for log_svcs_entry in data[u'Members']: response = self.get_request(self.root_uri + log_svcs_entry["@odata.id"]) if response['ret'] is False: return response _data = response['data'] # Check to make sure option is available, otherwise error is ugly if "Actions" in _data: if "#LogService.ClearLog" in _data[u"Actions"]: self.post_request(self.root_uri + _data[u"Actions"]["#LogService.ClearLog"]["target"], {}) if response['ret'] is False: return response return {'ret': True} def aggregate(self, func): ret = True entries = [] for systems_uri in self.systems_uris: inventory = func(systems_uri) ret = inventory.pop('ret') and ret if 'entries' in inventory: entries.append(({'systems_uri': systems_uri}, inventory['entries'])) return dict(ret=ret, entries=entries) def get_storage_controller_inventory(self, systems_uri): result = {} controller_list = [] controller_results = [] # Get these entries, but does not fail if not found properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers', 'Location', 'Manufacturer', 'Model', 'Name', 'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status'] key = "StorageControllers" # Find Storage service response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response data = response['data'] if 'Storage' not in data: return {'ret': False, 'msg': "Storage resource not found"} # Get a list of all storage controllers and build respective URIs storage_uri = data['Storage']["@odata.id"] response = self.get_request(self.root_uri + storage_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] # Loop through Members and their StorageControllers # and gather properties from each StorageController if data[u'Members']: for storage_member in data[u'Members']: storage_member_uri = storage_member[u'@odata.id'] response = self.get_request(self.root_uri + storage_member_uri) data = response['data'] if key in data: controller_list = data[key] for controller in controller_list: controller_result = {} for property in properties: if property in controller: controller_result[property] = controller[property] controller_results.append(controller_result) result['entries'] = controller_results return result else: return {'ret': False, 'msg': "Storage resource not found"} def get_multi_storage_controller_inventory(self): return self.aggregate(self.get_storage_controller_inventory) def get_disk_inventory(self, systems_uri): result = {'entries': []} controller_list = [] disk_results = [] # Get these entries, but does not fail if not found properties = ['BlockSizeBytes', 'CapableSpeedGbs', 'CapacityBytes', 'EncryptionAbility', 'EncryptionStatus', 'FailurePredicted', 'HotspareType', 'Id', 'Identifiers', 'Manufacturer', 'MediaType', 'Model', 'Name', 'PartNumber', 'PhysicalLocation', 'Protocol', 'Revision', 'RotationSpeedRPM', 'SerialNumber', 'Status'] # Find Storage service response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response data = response['data'] if 'SimpleStorage' not in data and 'Storage' not in data: return {'ret': False, 'msg': "SimpleStorage and Storage resource \ not found"} if 'Storage' in data: # Get a list of all storage controllers and build respective URIs storage_uri = data[u'Storage'][u'@odata.id'] response = self.get_request(self.root_uri + storage_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if data[u'Members']: for controller in data[u'Members']: controller_list.append(controller[u'@odata.id']) for c in controller_list: uri = self.root_uri + c response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] if 'Drives' in data: for device in data[u'Drives']: disk_uri = self.root_uri + device[u'@odata.id'] response = self.get_request(disk_uri) data = response['data'] disk_result = {} for property in properties: if property in data: if data[property] is not None: disk_result[property] = data[property] disk_results.append(disk_result) result["entries"].append(disk_results) if 'SimpleStorage' in data: # Get a list of all storage controllers and build respective URIs storage_uri = data["SimpleStorage"]["@odata.id"] response = self.get_request(self.root_uri + storage_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for controller in data[u'Members']: controller_list.append(controller[u'@odata.id']) for c in controller_list: uri = self.root_uri + c response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] for device in data[u'Devices']: disk_result = {} for property in properties: if property in device: disk_result[property] = device[property] disk_results.append(disk_result) result["entries"].append(disk_results) return result def get_multi_disk_inventory(self): return self.aggregate(self.get_disk_inventory) def get_volume_inventory(self, systems_uri): result = {'entries': []} controller_list = [] volume_list = [] volume_results = [] # Get these entries, but does not fail if not found properties = ['Id', 'Name', 'RAIDType', 'VolumeType', 'BlockSizeBytes', 'Capacity', 'CapacityBytes', 'CapacitySources', 'Encrypted', 'EncryptionTypes', 'Identifiers', 'Operations', 'OptimumIOSizeBytes', 'AccessCapabilities', 'AllocatedPools', 'Status'] # Find Storage service response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response data = response['data'] if 'SimpleStorage' not in data and 'Storage' not in data: return {'ret': False, 'msg': "SimpleStorage and Storage resource \ not found"} if 'Storage' in data: # Get a list of all storage controllers and build respective URIs storage_uri = data[u'Storage'][u'@odata.id'] response = self.get_request(self.root_uri + storage_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if data.get('Members'): for controller in data[u'Members']: controller_list.append(controller[u'@odata.id']) for c in controller_list: uri = self.root_uri + c response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] if 'Volumes' in data: # Get a list of all volumes and build respective URIs volumes_uri = data[u'Volumes'][u'@odata.id'] response = self.get_request(self.root_uri + volumes_uri) data = response['data'] if data.get('Members'): for volume in data[u'Members']: volume_list.append(volume[u'@odata.id']) for v in volume_list: uri = self.root_uri + v response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] volume_result = {} for property in properties: if property in data: if data[property] is not None: volume_result[property] = data[property] # Get related Drives Id drive_id_list = [] if 'Links' in data: if 'Drives' in data[u'Links']: for link in data[u'Links'][u'Drives']: drive_id_link = link[u'@odata.id'] drive_id = drive_id_link.split("/")[-1] drive_id_list.append({'Id': drive_id}) volume_result['Linked_drives'] = drive_id_list volume_results.append(volume_result) result["entries"].append(volume_results) else: return {'ret': False, 'msg': "Storage resource not found"} return result def get_multi_volume_inventory(self): return self.aggregate(self.get_volume_inventory) def restart_manager_gracefully(self): result = {} key = "Actions" # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + self.manager_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] action_uri = data[key]["#Manager.Reset"]["target"] payload = {'ResetType': 'GracefulRestart'} response = self.post_request(self.root_uri + action_uri, payload) if response['ret'] is False: return response return {'ret': True} def manage_indicator_led(self, command): result = {} key = 'IndicatorLED' payloads = {'IndicatorLedOn': 'Lit', 'IndicatorLedOff': 'Off', "IndicatorLedBlink": 'Blinking'} result = {} for chassis_uri in self.chassis_uri_list: response = self.get_request(self.root_uri + chassis_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} if command in payloads.keys(): payload = {'IndicatorLED': payloads[command]} response = self.patch_request(self.root_uri + chassis_uri, payload) if response['ret'] is False: return response else: return {'ret': False, 'msg': 'Invalid command'} return result def manage_system_power(self, command): key = "Actions" # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + self.systems_uris[0]) if response['ret'] is False: return response data = response['data'] power_state = data["PowerState"] if power_state == "On" and command == 'PowerOn': return {'ret': True, 'changed': False} if power_state == "Off" and command in ['PowerGracefulShutdown', 'PowerForceOff']: return {'ret': True, 'changed': False} reset_action = data[key]["#ComputerSystem.Reset"] action_uri = reset_action["target"] allowable_vals = reset_action.get("[email protected]", []) restart_cmd = "GracefulRestart" if "ForceRestart" in allowable_vals and "GracefulRestart" not in allowable_vals: restart_cmd = "ForceRestart" # Define payload accordingly if command == "PowerOn": payload = {'ResetType': 'On'} elif command == "PowerForceOff": payload = {'ResetType': 'ForceOff'} elif command == "PowerForceRestart": payload = {'ResetType': "ForceRestart"} elif command == "PowerGracefulRestart": payload = {'ResetType': 'GracefulRestart'} elif command == "PowerGracefulShutdown": payload = {'ResetType': 'GracefulShutdown'} elif command == "PowerReboot": if power_state == "On": payload = {'ResetType': restart_cmd} else: payload = {'ResetType': "On"} else: return {'ret': False, 'msg': 'Invalid Command'} response = self.post_request(self.root_uri + action_uri, payload) if response['ret'] is False: return response return {'ret': True, 'changed': True} def list_users(self): result = {} # listing all users has always been slower than other operations, why? user_list = [] users_results = [] # Get these entries, but does not fail if not found properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled'] response = self.get_request(self.root_uri + self.accounts_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for users in data[u'Members']: user_list.append(users[u'@odata.id']) # user_list[] are URIs # for each user, get details for uri in user_list: user = {} response = self.get_request(self.root_uri + uri) if response['ret'] is False: return response data = response['data'] for property in properties: if property in data: user[property] = data[property] users_results.append(user) result["entries"] = users_results return result def add_user(self, user): uri = self.root_uri + self.accounts_uri + "/" + user['userid'] username = {'UserName': user['username']} pswd = {'Password': user['userpswd']} roleid = {'RoleId': user['userrole']} enabled = {'Enabled': True} for payload in username, pswd, roleid, enabled: response = self.patch_request(uri, payload) if response['ret'] is False: return response return {'ret': True} def enable_user(self, user): uri = self.root_uri + self.accounts_uri + "/" + user['userid'] payload = {'Enabled': True} response = self.patch_request(uri, payload) if response['ret'] is False: return response return {'ret': True} def delete_user(self, user): uri = self.root_uri + self.accounts_uri + "/" + user['userid'] payload = {'UserName': ""} response = self.patch_request(uri, payload) if response['ret'] is False: return response return {'ret': True} def disable_user(self, user): uri = self.root_uri + self.accounts_uri + "/" + user['userid'] payload = {'Enabled': False} response = self.patch_request(uri, payload) if response['ret'] is False: return response return {'ret': True} def update_user_role(self, user): uri = self.root_uri + self.accounts_uri + "/" + user['userid'] payload = {'RoleId': user['userrole']} response = self.patch_request(uri, payload) if response['ret'] is False: return response return {'ret': True} def update_user_password(self, user): uri = self.root_uri + self.accounts_uri + "/" + user['userid'] payload = {'Password': user['userpswd']} response = self.patch_request(uri, payload) if response['ret'] is False: return response return {'ret': True} def get_sessions(self): result = {} # listing all users has always been slower than other operations, why? session_list = [] sessions_results = [] # Get these entries, but does not fail if not found properties = ['Description', 'Id', 'Name', 'UserName'] response = self.get_request(self.root_uri + self.sessions_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for sessions in data[u'Members']: session_list.append(sessions[u'@odata.id']) # session_list[] are URIs # for each session, get details for uri in session_list: session = {} response = self.get_request(self.root_uri + uri) if response['ret'] is False: return response data = response['data'] for property in properties: if property in data: session[property] = data[property] sessions_results.append(session) result["entries"] = sessions_results return result def get_firmware_update_capabilities(self): result = {} response = self.get_request(self.root_uri + self.update_uri) if response['ret'] is False: return response result['ret'] = True result['entries'] = {} data = response['data'] if "Actions" in data: actions = data['Actions'] if len(actions) > 0: for key in actions.keys(): action = actions.get(key) if 'title' in action: title = action['title'] else: title = key result['entries'][title] = action.get('[email protected]', ["Key [email protected] not found"]) else: return {'ret': "False", 'msg': "Actions list is empty."} else: return {'ret': "False", 'msg': "Key Actions not found."} return result def get_firmware_inventory(self): result = {} response = self.get_request(self.root_uri + self.firmware_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] result['entries'] = [] for device in data[u'Members']: uri = self.root_uri + device[u'@odata.id'] # Get details for each device response = self.get_request(uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] firmware = {} # Get these standard properties if present for key in ['Name', 'Id', 'Status', 'Version', 'Updateable', 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer', 'ReleaseDate']: if key in data: firmware[key] = data.get(key) result['entries'].append(firmware) return result def get_bios_attributes(self, systems_uri): result = {} bios_attributes = {} key = "Bios" # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} bios_uri = data[key]["@odata.id"] response = self.get_request(self.root_uri + bios_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for attribute in data[u'Attributes'].items(): bios_attributes[attribute[0]] = attribute[1] result["entries"] = bios_attributes return result def get_multi_bios_attributes(self): return self.aggregate(self.get_bios_attributes) def get_boot_order(self, systems_uri): result = {} # Get these entries from BootOption, if present properties = ['DisplayName', 'BootOptionReference'] # Retrieve System resource response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] # Confirm needed Boot properties are present if 'Boot' not in data or 'BootOrder' not in data['Boot']: return {'ret': False, 'msg': "Key BootOrder not found"} boot = data['Boot'] boot_order = boot['BootOrder'] # Retrieve BootOptions if present if 'BootOptions' in boot and '@odata.id' in boot['BootOptions']: boot_options_uri = boot['BootOptions']["@odata.id"] # Get BootOptions resource response = self.get_request(self.root_uri + boot_options_uri) if response['ret'] is False: return response data = response['data'] # Retrieve Members array if 'Members' not in data: return {'ret': False, 'msg': "Members not found in BootOptionsCollection"} members = data['Members'] else: members = [] # Build dict of BootOptions keyed by BootOptionReference boot_options_dict = {} for member in members: if '@odata.id' not in member: return {'ret': False, 'msg': "@odata.id not found in BootOptions"} boot_option_uri = member['@odata.id'] response = self.get_request(self.root_uri + boot_option_uri) if response['ret'] is False: return response data = response['data'] if 'BootOptionReference' not in data: return {'ret': False, 'msg': "BootOptionReference not found in BootOption"} boot_option_ref = data['BootOptionReference'] # fetch the props to display for this boot device boot_props = {} for prop in properties: if prop in data: boot_props[prop] = data[prop] boot_options_dict[boot_option_ref] = boot_props # Build boot device list boot_device_list = [] for ref in boot_order: boot_device_list.append( boot_options_dict.get(ref, {'BootOptionReference': ref})) result["entries"] = boot_device_list return result def get_multi_boot_order(self): return self.aggregate(self.get_boot_order) def get_boot_override(self, systems_uri): result = {} properties = ["BootSourceOverrideEnabled", "BootSourceOverrideTarget", "BootSourceOverrideMode", "UefiTargetBootSourceOverride", "[email protected]"] response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if 'Boot' not in data: return {'ret': False, 'msg': "Key Boot not found"} boot = data['Boot'] boot_overrides = {} if "BootSourceOverrideEnabled" in boot: if boot["BootSourceOverrideEnabled"] is not False: for property in properties: if property in boot: if boot[property] is not None: boot_overrides[property] = boot[property] else: return {'ret': False, 'msg': "No boot override is enabled."} result['entries'] = boot_overrides return result def get_multi_boot_override(self): return self.aggregate(self.get_boot_override) def set_bios_default_settings(self): result = {} key = "Bios" # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + self.systems_uris[0]) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} bios_uri = data[key]["@odata.id"] # Extract proper URI response = self.get_request(self.root_uri + bios_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] reset_bios_settings_uri = data["Actions"]["#Bios.ResetBios"]["target"] response = self.post_request(self.root_uri + reset_bios_settings_uri, {}) if response['ret'] is False: return response return {'ret': True, 'changed': True, 'msg': "Set BIOS to default settings"} def set_one_time_boot_device(self, bootdevice, uefi_target, boot_next): result = {} key = "Boot" if not bootdevice: return {'ret': False, 'msg': "bootdevice option required for SetOneTimeBoot"} # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + self.systems_uris[0]) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} boot = data[key] annotation = '[email protected]' if annotation in boot: allowable_values = boot[annotation] if isinstance(allowable_values, list) and bootdevice not in allowable_values: return {'ret': False, 'msg': "Boot device %s not in list of allowable values (%s)" % (bootdevice, allowable_values)} # read existing values enabled = boot.get('BootSourceOverrideEnabled') target = boot.get('BootSourceOverrideTarget') cur_uefi_target = boot.get('UefiTargetBootSourceOverride') cur_boot_next = boot.get('BootNext') if bootdevice == 'UefiTarget': if not uefi_target: return {'ret': False, 'msg': "uefi_target option required to SetOneTimeBoot for UefiTarget"} if enabled == 'Once' and target == bootdevice and uefi_target == cur_uefi_target: # If properties are already set, no changes needed return {'ret': True, 'changed': False} payload = { 'Boot': { 'BootSourceOverrideEnabled': 'Once', 'BootSourceOverrideTarget': bootdevice, 'UefiTargetBootSourceOverride': uefi_target } } elif bootdevice == 'UefiBootNext': if not boot_next: return {'ret': False, 'msg': "boot_next option required to SetOneTimeBoot for UefiBootNext"} if enabled == 'Once' and target == bootdevice and boot_next == cur_boot_next: # If properties are already set, no changes needed return {'ret': True, 'changed': False} payload = { 'Boot': { 'BootSourceOverrideEnabled': 'Once', 'BootSourceOverrideTarget': bootdevice, 'BootNext': boot_next } } else: if enabled == 'Once' and target == bootdevice: # If properties are already set, no changes needed return {'ret': True, 'changed': False} payload = { 'Boot': { 'BootSourceOverrideEnabled': 'Once', 'BootSourceOverrideTarget': bootdevice } } response = self.patch_request(self.root_uri + self.systems_uris[0], payload) if response['ret'] is False: return response return {'ret': True, 'changed': True} def set_bios_attributes(self, attr): result = {} key = "Bios" # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + self.systems_uris[0]) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} bios_uri = data[key]["@odata.id"] # Extract proper URI response = self.get_request(self.root_uri + bios_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] # First, check if BIOS attribute exists if attr['bios_attr_name'] not in data[u'Attributes']: return {'ret': False, 'msg': "BIOS attribute not found"} # Find out if value is already set to what we want. If yes, return if data[u'Attributes'][attr['bios_attr_name']] == attr['bios_attr_value']: return {'ret': True, 'changed': False, 'msg': "BIOS attribute already set"} set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"]["@odata.id"] # Example: bios_attr = {\"name\":\"value\"} bios_attr = "{\"" + attr['bios_attr_name'] + "\":\"" + attr['bios_attr_value'] + "\"}" payload = {"Attributes": json.loads(bios_attr)} response = self.patch_request(self.root_uri + set_bios_attr_uri, payload) if response['ret'] is False: return response return {'ret': True, 'changed': True, 'msg': "Modified BIOS attribute"} def get_chassis_inventory(self): result = {} chassis_results = [] # Get these entries, but does not fail if not found properties = ['ChassisType', 'PartNumber', 'AssetTag', 'Manufacturer', 'IndicatorLED', 'SerialNumber', 'Model'] # Go through list for chassis_uri in self.chassis_uri_list: response = self.get_request(self.root_uri + chassis_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] chassis_result = {} for property in properties: if property in data: chassis_result[property] = data[property] chassis_results.append(chassis_result) result["entries"] = chassis_results return result def get_fan_inventory(self): result = {} fan_results = [] key = "Thermal" # Get these entries, but does not fail if not found properties = ['FanName', 'Reading', 'ReadingUnits', 'Status'] # Go through list for chassis_uri in self.chassis_uri_list: response = self.get_request(self.root_uri + chassis_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key in data: # match: found an entry for "Thermal" information = fans thermal_uri = data[key]["@odata.id"] response = self.get_request(self.root_uri + thermal_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for device in data[u'Fans']: fan = {} for property in properties: if property in device: fan[property] = device[property] fan_results.append(fan) result["entries"] = fan_results return result def get_chassis_power(self): result = {} key = "Power" # Get these entries, but does not fail if not found properties = ['Name', 'PowerAllocatedWatts', 'PowerAvailableWatts', 'PowerCapacityWatts', 'PowerConsumedWatts', 'PowerMetrics', 'PowerRequestedWatts', 'RelatedItem', 'Status'] chassis_power_results = [] # Go through list for chassis_uri in self.chassis_uri_list: chassis_power_result = {} response = self.get_request(self.root_uri + chassis_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key in data: response = self.get_request(self.root_uri + data[key]['@odata.id']) data = response['data'] if 'PowerControl' in data: if len(data['PowerControl']) > 0: data = data['PowerControl'][0] for property in properties: if property in data: chassis_power_result[property] = data[property] else: return {'ret': False, 'msg': 'Key PowerControl not found.'} chassis_power_results.append(chassis_power_result) else: return {'ret': False, 'msg': 'Key Power not found.'} result['entries'] = chassis_power_results return result def get_chassis_thermals(self): result = {} sensors = [] key = "Thermal" # Get these entries, but does not fail if not found properties = ['Name', 'PhysicalContext', 'UpperThresholdCritical', 'UpperThresholdFatal', 'UpperThresholdNonCritical', 'LowerThresholdCritical', 'LowerThresholdFatal', 'LowerThresholdNonCritical', 'MaxReadingRangeTemp', 'MinReadingRangeTemp', 'ReadingCelsius', 'RelatedItem', 'SensorNumber'] # Go through list for chassis_uri in self.chassis_uri_list: response = self.get_request(self.root_uri + chassis_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key in data: thermal_uri = data[key]["@odata.id"] response = self.get_request(self.root_uri + thermal_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if "Temperatures" in data: for sensor in data[u'Temperatures']: sensor_result = {} for property in properties: if property in sensor: if sensor[property] is not None: sensor_result[property] = sensor[property] sensors.append(sensor_result) if sensors is None: return {'ret': False, 'msg': 'Key Temperatures was not found.'} result['entries'] = sensors return result def get_cpu_inventory(self, systems_uri): result = {} cpu_list = [] cpu_results = [] key = "Processors" # Get these entries, but does not fail if not found properties = ['Id', 'Manufacturer', 'Model', 'MaxSpeedMHz', 'TotalCores', 'TotalThreads', 'Status'] # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} processors_uri = data[key]["@odata.id"] # Get a list of all CPUs and build respective URIs response = self.get_request(self.root_uri + processors_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for cpu in data[u'Members']: cpu_list.append(cpu[u'@odata.id']) for c in cpu_list: cpu = {} uri = self.root_uri + c response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] for property in properties: if property in data: cpu[property] = data[property] cpu_results.append(cpu) result["entries"] = cpu_results return result def get_multi_cpu_inventory(self): return self.aggregate(self.get_cpu_inventory) def get_memory_inventory(self, systems_uri): result = {} memory_list = [] memory_results = [] key = "Memory" # Get these entries, but does not fail if not found properties = ['SerialNumber', 'MemoryDeviceType', 'PartNuber', 'MemoryLocation', 'RankCount', 'CapacityMiB', 'OperatingMemoryModes', 'Status', 'Manufacturer', 'Name'] # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} memory_uri = data[key]["@odata.id"] # Get a list of all DIMMs and build respective URIs response = self.get_request(self.root_uri + memory_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for dimm in data[u'Members']: memory_list.append(dimm[u'@odata.id']) for m in memory_list: dimm = {} uri = self.root_uri + m response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] if "Status" in data: if "State" in data["Status"]: if data["Status"]["State"] == "Absent": continue else: continue for property in properties: if property in data: dimm[property] = data[property] memory_results.append(dimm) result["entries"] = memory_results return result def get_multi_memory_inventory(self): return self.aggregate(self.get_memory_inventory) def get_nic_inventory(self, resource_uri): result = {} nic_list = [] nic_results = [] key = "EthernetInterfaces" # Get these entries, but does not fail if not found properties = ['Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses', 'NameServers', 'MACAddress', 'PermanentMACAddress', 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status'] response = self.get_request(self.root_uri + resource_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} ethernetinterfaces_uri = data[key]["@odata.id"] # Get a list of all network controllers and build respective URIs response = self.get_request(self.root_uri + ethernetinterfaces_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for nic in data[u'Members']: nic_list.append(nic[u'@odata.id']) for n in nic_list: nic = {} uri = self.root_uri + n response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] for property in properties: if property in data: nic[property] = data[property] nic_results.append(nic) result["entries"] = nic_results return result def get_multi_nic_inventory(self, resource_type): ret = True entries = [] # Given resource_type, use the proper URI if resource_type == 'Systems': resource_uris = self.systems_uris elif resource_type == 'Manager': # put in a list to match what we're doing with systems_uris resource_uris = [self.manager_uri] for resource_uri in resource_uris: inventory = self.get_nic_inventory(resource_uri) ret = inventory.pop('ret') and ret if 'entries' in inventory: entries.append(({'resource_uri': resource_uri}, inventory['entries'])) return dict(ret=ret, entries=entries) def get_virtualmedia(self, resource_uri): result = {} virtualmedia_list = [] virtualmedia_results = [] key = "VirtualMedia" # Get these entries, but does not fail if not found properties = ['Description', 'ConnectedVia', 'Id', 'MediaTypes', 'Image', 'ImageName', 'Name', 'WriteProtected', 'TransferMethod', 'TransferProtocolType'] response = self.get_request(self.root_uri + resource_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} virtualmedia_uri = data[key]["@odata.id"] # Get a list of all virtual media and build respective URIs response = self.get_request(self.root_uri + virtualmedia_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for virtualmedia in data[u'Members']: virtualmedia_list.append(virtualmedia[u'@odata.id']) for n in virtualmedia_list: virtualmedia = {} uri = self.root_uri + n response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] for property in properties: if property in data: virtualmedia[property] = data[property] virtualmedia_results.append(virtualmedia) result["entries"] = virtualmedia_results return result def get_multi_virtualmedia(self): ret = True entries = [] # Because _find_managers_resource() only find last Manager uri in self.manager_uri, not one list. This should be 1 issue. # I have to put manager_uri into list to reduce future changes when the issue is fixed. resource_uris = [self.manager_uri] for resource_uri in resource_uris: virtualmedia = self.get_virtualmedia(resource_uri) ret = virtualmedia.pop('ret') and ret if 'entries' in virtualmedia: entries.append(({'resource_uri': resource_uri}, virtualmedia['entries'])) return dict(ret=ret, entries=entries) def get_psu_inventory(self): result = {} psu_list = [] psu_results = [] key = "PowerSupplies" # Get these entries, but does not fail if not found properties = ['Name', 'Model', 'SerialNumber', 'PartNumber', 'Manufacturer', 'FirmwareVersion', 'PowerCapacityWatts', 'PowerSupplyType', 'Status'] # Get a list of all Chassis and build URIs, then get all PowerSupplies # from each Power entry in the Chassis chassis_uri_list = self.chassis_uri_list for chassis_uri in chassis_uri_list: response = self.get_request(self.root_uri + chassis_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if 'Power' in data: power_uri = data[u'Power'][u'@odata.id'] else: continue response = self.get_request(self.root_uri + power_uri) data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} psu_list = data[key] for psu in psu_list: psu_not_present = False psu_data = {} for property in properties: if property in psu: if psu[property] is not None: if property == 'Status': if 'State' in psu[property]: if psu[property]['State'] == 'Absent': psu_not_present = True psu_data[property] = psu[property] if psu_not_present: continue psu_results.append(psu_data) result["entries"] = psu_results if not result["entries"]: return {'ret': False, 'msg': "No PowerSupply objects found"} return result def get_multi_psu_inventory(self): return self.aggregate(self.get_psu_inventory) def get_system_inventory(self, systems_uri): result = {} inventory = {} # Get these entries, but does not fail if not found properties = ['Status', 'HostName', 'PowerState', 'Model', 'Manufacturer', 'PartNumber', 'SystemType', 'AssetTag', 'ServiceTag', 'SerialNumber', 'SKU', 'BiosVersion', 'MemorySummary', 'ProcessorSummary', 'TrustedModules'] response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for property in properties: if property in data: inventory[property] = data[property] result["entries"] = inventory return result def get_multi_system_inventory(self): return self.aggregate(self.get_system_inventory)
closed
ansible/ansible
https://github.com/ansible/ansible
61,305
IOS l3 interface tests failing intermittently coz of zuul env issue
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> IOS l3 interface tests failing coz of zuul env issue, replace after assert dict is failing intermittently in diff python version, i.e. sometimes it fails on python27 and other time it fails on python35/37 (ref: https://github.com/ansible/ansible/pull/61103#issuecomment-524646499) ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ios_l3_interfaces ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> zuul ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> All test should pass w/o issue ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> Test fails intermittently <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61305
https://github.com/ansible/ansible/pull/61682
01f4081b663ae260f88283aa66ae6fb03bc40ff4
2672dc9694f3fe80c7a817f81554a6c8561b065b
2019-08-26T08:47:47Z
python
2019-09-04T10:21:05Z
lib/ansible/module_utils/network/ios/config/l3_interfaces/l3_interfaces.py
# -*- coding: utf-8 -*- # Copyright 2019 Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """ The ios_l3_interfaces class It is in this file where the current configuration (as dict) is compared to the provided configuration (as dict) and the command set necessary to bring the current configuration to it's desired end-state is created """ from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible.module_utils.network.common.cfg.base import ConfigBase from ansible.module_utils.network.common.utils import to_list from ansible.module_utils.network.ios.facts.facts import Facts from ansible.module_utils.network.ios.utils.utils import dict_to_set from ansible.module_utils.network.ios.utils.utils import remove_command_from_config_list, add_command_to_config_list from ansible.module_utils.network.ios.utils.utils import filter_dict_having_none_value, remove_duplicate_interface from ansible.module_utils.network.ios.utils.utils import validate_n_expand_ipv4, validate_ipv6 class L3_Interfaces(ConfigBase): """ The ios_l3_interfaces class """ gather_subset = [ '!all', '!min', ] gather_network_resources = [ 'l3_interfaces' ] def get_l3_interfaces_facts(self): """ Get the 'facts' (the current configuration) :rtype: A dictionary :returns: The current configuration as a dictionary """ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) l3_interfaces_facts = facts['ansible_network_resources'].get('l3_interfaces') if not l3_interfaces_facts: return [] return l3_interfaces_facts def execute_module(self): """ Execute the module :rtype: A dictionary :returns: The result from module execution """ result = {'changed': False} commands = list() warnings = list() existing_l3_interfaces_facts = self.get_l3_interfaces_facts() commands.extend(self.set_config(existing_l3_interfaces_facts)) if commands: if not self._module.check_mode: self._connection.edit_config(commands) result['changed'] = True result['commands'] = commands changed_l3_interfaces_facts = self.get_l3_interfaces_facts() result['before'] = existing_l3_interfaces_facts if result['changed']: result['after'] = changed_l3_interfaces_facts result['warnings'] = warnings return result def set_config(self, existing_l3_interfaces_facts): """ Collect the configuration from the args passed to the module, collect the current configuration (as a dict from facts) :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ want = self._module.params['config'] have = existing_l3_interfaces_facts resp = self.set_state(want, have) return to_list(resp) def set_state(self, want, have): """ Select the appropriate function based on the state provided :param want: the desired configuration as a dictionary :param have: the current configuration as a dictionary :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ commands = [] state = self._module.params['state'] if state == 'overridden': commands = self._state_overridden(want, have, self._module) elif state == 'deleted': commands = self._state_deleted(want, have) elif state == 'merged': commands = self._state_merged(want, have, self._module) elif state == 'replaced': commands = self._state_replaced(want, have, self._module) return commands def _state_replaced(self, want, have, module): """ The command generator when state is replaced :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ commands = [] for interface in want: for each in have: if each['name'] == interface['name']: break else: if '.' in interface['name']: commands.extend(self._set_config(interface, dict(), module)) continue have_dict = filter_dict_having_none_value(interface, each) commands.extend(self._clear_config(dict(), have_dict)) commands.extend(self._set_config(interface, each, module)) # Remove the duplicate interface call commands = remove_duplicate_interface(commands) return commands def _state_overridden(self, want, have, module): """ The command generator when state is overridden :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ commands = [] for each in have: for interface in want: if each['name'] == interface['name']: break else: # We didn't find a matching desired state, which means we can # pretend we recieved an empty desired state. interface = dict(name=each['name']) kwargs = {'want': interface, 'have': each} commands.extend(self._clear_config(**kwargs)) continue have_dict = filter_dict_having_none_value(interface, each) commands.extend(self._clear_config(dict(), have_dict)) commands.extend(self._set_config(interface, each, module)) # Remove the duplicate interface call commands = remove_duplicate_interface(commands) return commands def _state_merged(self, want, have, module): """ The command generator when state is merged :rtype: A list :returns: the commands necessary to merge the provided into the current configuration """ commands = [] for interface in want: for each in have: if each['name'] == interface['name']: break else: if '.' in interface['name']: commands.extend(self._set_config(interface, dict(), module)) continue commands.extend(self._set_config(interface, each, module)) return commands def _state_deleted(self, want, have): """ The command generator when state is deleted :rtype: A list :returns: the commands necessary to remove the current configuration of the provided objects """ commands = [] if want: for interface in want: for each in have: if each['name'] == interface['name']: break elif interface['name'] in each['name']: break else: continue interface = dict(name=interface['name']) commands.extend(self._clear_config(interface, each)) else: for each in have: want = dict() commands.extend(self._clear_config(want, each)) return commands def _set_config(self, want, have, module): # Set the interface config based on the want and have config commands = [] interface = 'interface ' + want['name'] # To handle L3 IPV4 configuration if want.get("ipv4"): for each in want.get("ipv4"): if each.get('address') != 'dhcp': ip_addr_want = validate_n_expand_ipv4(module, each) each['address'] = ip_addr_want # Convert the want and have dict to set want_dict = dict_to_set(want) have_dict = dict_to_set(have) # To handle L3 IPV4 configuration if want.get('ipv4'): # Get the diff b/w want and have IPV4 if have.get('ipv4'): ipv4 = tuple(set(dict(want_dict).get('ipv4')) - set(dict(have_dict).get('ipv4'))) else: diff = want_dict - have_dict ipv4 = dict(diff).get('ipv4') if ipv4: for each in ipv4: ipv4_dict = dict(each) if ipv4_dict.get('address') != 'dhcp': cmd = "ip address {0}".format(ipv4_dict['address']) if ipv4_dict.get("secondary"): cmd += " secondary" elif ipv4_dict.get('address') == 'dhcp': cmd = "ip address dhcp" if ipv4_dict.get('dhcp_client') is not None and ipv4_dict.get('dhcp_hostname'): cmd = "ip address dhcp client-id GigabitEthernet 0/{0} hostname {1}"\ .format(ipv4_dict.get('dhcp_client'), ipv4_dict.get('dhcp_hostname')) elif ipv4_dict.get('dhcp_client') and not ipv4_dict.get('dhcp_hostname'): cmd = "ip address dhcp client-id GigabitEthernet 0/{0}"\ .format(ipv4_dict.get('dhcp_client')) elif not ipv4_dict.get('dhcp_client') and ipv4_dict.get('dhcp_hostname'): cmd = "ip address dhcp hostname {0}".format(ipv4_dict.get('dhcp_client')) add_command_to_config_list(interface, cmd, commands) # To handle L3 IPV6 configuration if want.get('ipv6'): # Get the diff b/w want and have IPV6 if have.get('ipv6'): ipv6 = tuple(set(dict(want_dict).get('ipv6')) - set(dict(have_dict).get('ipv6'))) else: diff = want_dict - have_dict ipv6 = dict(diff).get('ipv6') if ipv6: for each in ipv6: ipv6_dict = dict(each) validate_ipv6(ipv6_dict.get('address'), module) cmd = "ipv6 address {0}".format(ipv6_dict.get('address')) add_command_to_config_list(interface, cmd, commands) return commands def _clear_config(self, want, have): # Delete the interface config based on the want and have config count = 0 commands = [] if want.get('name'): interface = 'interface ' + want['name'] else: interface = 'interface ' + have['name'] if have.get('ipv4') and want.get('ipv4'): for each in have.get('ipv4'): if each.get('secondary') and not (want.get('ipv4')[count].get('secondary')): cmd = 'ipv4 address {0} secondary'.format(each.get('address')) remove_command_from_config_list(interface, cmd, commands) count += 1 if have.get('ipv4') and not want.get('ipv4'): remove_command_from_config_list(interface, 'ip address', commands) if have.get('ipv6') and not want.get('ipv6'): remove_command_from_config_list(interface, 'ipv6 address', commands) return commands
closed
ansible/ansible
https://github.com/ansible/ansible
61,305
IOS l3 interface tests failing intermittently coz of zuul env issue
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> IOS l3 interface tests failing coz of zuul env issue, replace after assert dict is failing intermittently in diff python version, i.e. sometimes it fails on python27 and other time it fails on python35/37 (ref: https://github.com/ansible/ansible/pull/61103#issuecomment-524646499) ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ios_l3_interfaces ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> zuul ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> All test should pass w/o issue ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> Test fails intermittently <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61305
https://github.com/ansible/ansible/pull/61682
01f4081b663ae260f88283aa66ae6fb03bc40ff4
2672dc9694f3fe80c7a817f81554a6c8561b065b
2019-08-26T08:47:47Z
python
2019-09-04T10:21:05Z
lib/ansible/module_utils/network/ios/utils/utils.py
# # -*- coding: utf-8 -*- # Copyright 2019 Red Hat # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # utils from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible.module_utils.six import iteritems from ansible.module_utils.network.common.utils import is_masklen, to_netmask def remove_command_from_config_list(interface, cmd, commands): # To delete the passed config if interface not in commands: commands.insert(0, interface) commands.append('no %s' % cmd) return commands def add_command_to_config_list(interface, cmd, commands): # To set the passed config if interface not in commands: commands.insert(0, interface) commands.append(cmd) def dict_to_set(sample_dict): # Generate a set with passed dictionary for comparison test_dict = {} if isinstance(sample_dict, dict): for k, v in iteritems(sample_dict): if v is not None: if isinstance(v, list): if isinstance(v[0], dict): li = [] for each in v: for key, value in iteritems(each): if isinstance(value, list): each[key] = tuple(value) li.append(tuple(iteritems(each))) v = tuple(li) else: v = tuple(v) elif isinstance(v, dict): li = [] for key, value in iteritems(v): if isinstance(value, list): v[key] = tuple(value) li.extend(tuple(iteritems(v))) v = tuple(li) test_dict.update({k: v}) return_set = set(tuple(iteritems(test_dict))) else: return_set = set(sample_dict) return return_set def filter_dict_having_none_value(want, have): # Generate dict with have dict value which is None in want dict test_dict = dict() test_key_dict = dict() name = want.get('name') if name: test_dict['name'] = name diff_ip = False want_ip = '' for k, v in iteritems(want): if isinstance(v, dict): for key, value in iteritems(v): if value is None: dict_val = have.get(k).get(key) test_key_dict.update({key: dict_val}) test_dict.update({k: test_key_dict}) if isinstance(v, list): for key, value in iteritems(v[0]): if value is None: dict_val = have.get(k).get(key) test_key_dict.update({key: dict_val}) test_dict.update({k: test_key_dict}) # below conditions checks are added to check if # secondary IP is configured, if yes then delete # the already configured IP if want and have IP # is different else if it's same no need to delete for each in v: if each.get('secondary'): want_ip = each.get('address').split('/') have_ip = have.get('ipv4') if len(want_ip) > 1 and have_ip and have_ip[0].get('secondary'): have_ip = have_ip[0]['address'].split(' ')[0] if have_ip != want_ip[0]: diff_ip = True if each.get('secondary') and diff_ip is True: test_key_dict.update({'secondary': True}) test_dict.update({'ipv4': test_key_dict}) if v is None: val = have.get(k) test_dict.update({k: val}) return test_dict def remove_duplicate_interface(commands): # Remove duplicate interface from commands set_cmd = [] for each in commands: if 'interface' in each: if each not in set_cmd: set_cmd.append(each) else: set_cmd.append(each) return set_cmd def validate_ipv4(value, module): if value: address = value.split('/') if len(address) != 2: module.fail_json(msg='address format is <ipv4 address>/<mask>, got invalid format {0}'.format(value)) if not is_masklen(address[1]): module.fail_json(msg='invalid value for mask: {0}, mask should be in range 0-32'.format(address[1])) def validate_ipv6(value, module): if value: address = value.split('/') if len(address) != 2: module.fail_json(msg='address format is <ipv6 address>/<mask>, got invalid format {0}'.format(value)) else: if not 0 <= int(address[1]) <= 128: module.fail_json(msg='invalid value for mask: {0}, mask should be in range 0-128'.format(address[1])) def validate_n_expand_ipv4(module, want): # Check if input IPV4 is valid IP and expand IPV4 with its subnet mask ip_addr_want = want.get('address') validate_ipv4(ip_addr_want, module) ip = ip_addr_want.split('/') if len(ip) == 2: ip_addr_want = '{0} {1}'.format(ip[0], to_netmask(ip[1])) return ip_addr_want def normalize_interface(name): """Return the normalized interface name """ if not name: return def _get_number(name): digits = '' for char in name: if char.isdigit() or char in '/.': digits += char return digits if name.lower().startswith('gi'): if_type = 'GigabitEthernet' elif name.lower().startswith('te'): if_type = 'TenGigabitEthernet' elif name.lower().startswith('fa'): if_type = 'FastEthernet' elif name.lower().startswith('fo'): if_type = 'FortyGigabitEthernet' elif name.lower().startswith('long'): if_type = 'LongReachEthernet' elif name.lower().startswith('et'): if_type = 'Ethernet' elif name.lower().startswith('vl'): if_type = 'Vlan' elif name.lower().startswith('lo'): if_type = 'loopback' elif name.lower().startswith('po'): if_type = 'Port-channel' elif name.lower().startswith('nv'): if_type = 'nve' elif name.lower().startswith('twe'): if_type = 'TwentyFiveGigE' elif name.lower().startswith('hu'): if_type = 'HundredGigE' else: if_type = None number_list = name.split(' ') if len(number_list) == 2: number = number_list[-1].strip() else: number = _get_number(name) if if_type: proper_interface = if_type + number else: proper_interface = name return proper_interface def get_interface_type(interface): """Gets the type of interface """ if interface.upper().startswith('GI'): return 'GigabitEthernet' elif interface.upper().startswith('TE'): return 'TenGigabitEthernet' elif interface.upper().startswith('FA'): return 'FastEthernet' elif interface.upper().startswith('FO'): return 'FortyGigabitEthernet' elif interface.upper().startswith('LON'): return 'LongReachEthernet' elif interface.upper().startswith('ET'): return 'Ethernet' elif interface.upper().startswith('VL'): return 'Vlan' elif interface.upper().startswith('LO'): return 'loopback' elif interface.upper().startswith('PO'): return 'Port-channel' elif interface.upper().startswith('NV'): return 'nve' elif interface.upper().startswith('TWE'): return 'TwentyFiveGigE' elif interface.upper().startswith('HU'): return 'HundredGigE' else: return 'unknown'
closed
ansible/ansible
https://github.com/ansible/ansible
61,305
IOS l3 interface tests failing intermittently coz of zuul env issue
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> IOS l3 interface tests failing coz of zuul env issue, replace after assert dict is failing intermittently in diff python version, i.e. sometimes it fails on python27 and other time it fails on python35/37 (ref: https://github.com/ansible/ansible/pull/61103#issuecomment-524646499) ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ios_l3_interfaces ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> zuul ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> All test should pass w/o issue ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> Test fails intermittently <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61305
https://github.com/ansible/ansible/pull/61682
01f4081b663ae260f88283aa66ae6fb03bc40ff4
2672dc9694f3fe80c7a817f81554a6c8561b065b
2019-08-26T08:47:47Z
python
2019-09-04T10:21:05Z
test/integration/targets/ios_l3_interfaces/tests/cli/_populate_config.yaml
--- - name: Populate Config cli_config: config: "{{ lines }}" vars: lines: | interface GigabitEthernet 0/1 ip address dhcp client-id GigabitEthernet 0/0 hostname test.com interface GigabitEthernet 0/2 ip address 192.168.2.1 255.255.255.0 secondary ip address 192.168.2.2 255.255.255.0 ipv6 address fd5d:12c9:2201:1::1/64
closed
ansible/ansible
https://github.com/ansible/ansible
61,305
IOS l3 interface tests failing intermittently coz of zuul env issue
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> IOS l3 interface tests failing coz of zuul env issue, replace after assert dict is failing intermittently in diff python version, i.e. sometimes it fails on python27 and other time it fails on python35/37 (ref: https://github.com/ansible/ansible/pull/61103#issuecomment-524646499) ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ios_l3_interfaces ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> zuul ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> All test should pass w/o issue ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> Test fails intermittently <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61305
https://github.com/ansible/ansible/pull/61682
01f4081b663ae260f88283aa66ae6fb03bc40ff4
2672dc9694f3fe80c7a817f81554a6c8561b065b
2019-08-26T08:47:47Z
python
2019-09-04T10:21:05Z
test/integration/targets/ios_l3_interfaces/tests/cli/merged.yaml
--- - debug: msg: "START Merged ios_l3_interfaces state for integration tests on connection={{ ansible_connection }}" - include_tasks: _remove_config.yaml - block: - name: Merge provided configuration with device configuration ios_l3_interfaces: &merged config: - name: GigabitEthernet0/1 ipv4: - address: dhcp dhcp_client: 0 dhcp_hostname: test.com - name: GigabitEthernet0/2 ipv4: - address: 192.168.3.1/24 secondary: True - address: 192.168.3.2/24 ipv6: - address: fd5d:12c9:2201:1::1/64 state: merged register: result - name: Assert that correct set of commands were generated assert: that: - "{{ merged['commands'] | symmetric_difference(result['commands']) | length == 0 }}" - name: Assert that before dicts are correctly generated assert: that: - "{{ merged['before'] | symmetric_difference(result['before']) | length == 0 }}" - name: Assert that after dict is correctly generated assert: that: - "{{ merged['after'] | symmetric_difference(result['after']) | length == 0 }}" - name: Merge provided configuration with device configuration (IDEMPOTENT) ios_l3_interfaces: *merged register: result - name: Assert that the previous task was idempotent assert: that: - "result['changed'] == false" always: - include_tasks: _remove_config.yaml
closed
ansible/ansible
https://github.com/ansible/ansible
61,305
IOS l3 interface tests failing intermittently coz of zuul env issue
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> IOS l3 interface tests failing coz of zuul env issue, replace after assert dict is failing intermittently in diff python version, i.e. sometimes it fails on python27 and other time it fails on python35/37 (ref: https://github.com/ansible/ansible/pull/61103#issuecomment-524646499) ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ios_l3_interfaces ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> zuul ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> All test should pass w/o issue ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> Test fails intermittently <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61305
https://github.com/ansible/ansible/pull/61682
01f4081b663ae260f88283aa66ae6fb03bc40ff4
2672dc9694f3fe80c7a817f81554a6c8561b065b
2019-08-26T08:47:47Z
python
2019-09-04T10:21:05Z
test/integration/targets/ios_l3_interfaces/tests/cli/overridden.yaml
--- - debug: msg: "START Overridden ios_l3_interfaces state for integration tests on connection={{ ansible_connection }}" - include_tasks: _remove_config.yaml - include_tasks: _populate_config.yaml - block: - name: Override device configuration of all interfaces with provided configuration ios_l3_interfaces: &overridden config: - name: GigabitEthernet0/0 ipv4: - address: dhcp - name: GigabitEthernet0/2 ipv4: - address: 192.168.4.1/24 - address: 192.168.4.2/24 secondary: True state: overridden register: result - name: Assert that correct set of commands were generated assert: that: - "{{ overridden['commands'] | symmetric_difference(result['commands']) | length == 0 }}" - name: Assert that before dicts are correctly generated assert: that: - "{{ overridden['before'] | symmetric_difference(result['before']) | length == 0 }}" - name: Assert that after dict is correctly generated assert: that: - "{{ overridden['after'] | symmetric_difference(result['after']) | length == 0 }}" - name: Override device configuration of all interfaces with provided configuration (IDEMPOTENT) ios_l3_interfaces: *overridden register: result - name: Assert that task was idempotent assert: that: - "result['changed'] == false" always: - include_tasks: _remove_config.yaml
closed
ansible/ansible
https://github.com/ansible/ansible
61,305
IOS l3 interface tests failing intermittently coz of zuul env issue
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> IOS l3 interface tests failing coz of zuul env issue, replace after assert dict is failing intermittently in diff python version, i.e. sometimes it fails on python27 and other time it fails on python35/37 (ref: https://github.com/ansible/ansible/pull/61103#issuecomment-524646499) ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ios_l3_interfaces ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> zuul ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> All test should pass w/o issue ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> Test fails intermittently <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61305
https://github.com/ansible/ansible/pull/61682
01f4081b663ae260f88283aa66ae6fb03bc40ff4
2672dc9694f3fe80c7a817f81554a6c8561b065b
2019-08-26T08:47:47Z
python
2019-09-04T10:21:05Z
test/integration/targets/ios_l3_interfaces/tests/cli/replaced.yaml
--- - debug: msg: "START Replaced ios_l3_interfaces state for integration tests on connection={{ ansible_connection }}" - include_tasks: _remove_config.yaml - include_tasks: _populate_config.yaml - block: - name: Replaces device configuration of listed interfaces with provided configuration ios_l3_interfaces: &replaced config: - name: GigabitEthernet0/1 ipv4: - address: 192.168.3.1/24 - name: GigabitEthernet0/2 ipv4: - address: 192.168.4.1/24 secondary: True - address: 192.168.4.2/24 state: replaced register: result - name: Assert that correct set of commands were generated assert: that: - "{{ replaced['commands'] | symmetric_difference(result['commands']) | length == 0 }}" - name: Assert that before dicts are correctly generated assert: that: - "{{ replaced['before'] | symmetric_difference(result['before']) | length == 0 }}" - name: Assert that after dict is correctly generated assert: that: - "{{ replaced['after'] | symmetric_difference(result['after']) | length == 0 }}" - name: Replaces device configuration of listed interfaces with provided configuration (IDEMPOTENT) ios_l3_interfaces: *replaced register: result - name: Assert that task was idempotent assert: that: - "result['changed'] == false" always: - include_tasks: _remove_config.yaml
closed
ansible/ansible
https://github.com/ansible/ansible
61,305
IOS l3 interface tests failing intermittently coz of zuul env issue
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> IOS l3 interface tests failing coz of zuul env issue, replace after assert dict is failing intermittently in diff python version, i.e. sometimes it fails on python27 and other time it fails on python35/37 (ref: https://github.com/ansible/ansible/pull/61103#issuecomment-524646499) ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ios_l3_interfaces ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> zuul ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> All test should pass w/o issue ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> Test fails intermittently <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61305
https://github.com/ansible/ansible/pull/61682
01f4081b663ae260f88283aa66ae6fb03bc40ff4
2672dc9694f3fe80c7a817f81554a6c8561b065b
2019-08-26T08:47:47Z
python
2019-09-04T10:21:05Z
test/integration/targets/ios_l3_interfaces/vars/main.yaml
--- merged: before: - name: loopback888 - name: loopback999 - ipv4: - address: dhcp name: GigabitEthernet0/0 - name: GigabitEthernet0/1 - name: GigabitEthernet0/2 commands: - "interface GigabitEthernet0/1" - "ip address dhcp client-id GigabitEthernet 0/0 hostname test.com" - "interface GigabitEthernet0/2" - "ip address 192.168.3.1 255.255.255.0 secondary" - "ip address 192.168.3.2 255.255.255.0" - "ipv6 address fd5d:12c9:2201:1::1/64" after: - name: loopback888 - name: loopback999 - ipv4: - address: dhcp name: GigabitEthernet0/0 - ipv4: - address: dhcp dhcp_client: 0 dhcp_hostname: test.com name: GigabitEthernet0/1 - ipv4: - address: 192.168.3.1 255.255.255.0 secondary: true - address: 192.168.3.2 255.255.255.0 ipv6: - address: fd5d:12c9:2201:1::1/64 name: GigabitEthernet0/2 replaced: before: - name: loopback888 - name: loopback999 - ipv4: - address: dhcp name: GigabitEthernet0/0 - ipv4: - address: dhcp dhcp_client: 0 dhcp_hostname: test.com name: GigabitEthernet0/1 - ipv4: - address: 192.168.2.1 255.255.255.0 secondary: true - address: 192.168.2.2 255.255.255.0 ipv6: - address: fd5d:12c9:2201:1::1/64 name: GigabitEthernet0/2 commands: - "interface GigabitEthernet0/1" - "ip address 192.168.3.1 255.255.255.0" - "interface GigabitEthernet0/2" - "no ip address" - "no ipv6 address" - "ip address 192.168.4.1 255.255.255.0 secondary" - "ip address 192.168.4.2 255.255.255.0" after: - name: loopback888 - name: loopback999 - ipv4: - address: dhcp name: GigabitEthernet0/0 - ipv4: - address: 192.168.3.1 255.255.255.0 name: GigabitEthernet0/1 - ipv4: - address: 192.168.4.1 255.255.255.0 secondary: true - address: 192.168.4.2 255.255.255.0 name: GigabitEthernet0/2 overridden: before: - name: loopback888 - name: loopback999 - ipv4: - address: dhcp name: GigabitEthernet0/0 - ipv4: - address: dhcp dhcp_client: 0 dhcp_hostname: test.com name: GigabitEthernet0/1 - ipv4: - address: 192.168.2.1 255.255.255.0 secondary: true - address: 192.168.2.2 255.255.255.0 ipv6: - address: fd5d:12c9:2201:1::1/64 name: GigabitEthernet0/2 commands: - "interface GigabitEthernet0/1" - "no ip address" - "interface GigabitEthernet0/2" - "no ip address" - "no ipv6 address" - "ip address 192.168.4.1 255.255.255.0" - "ip address 192.168.4.2 255.255.255.0 secondary" after: - name: loopback888 - name: loopback999 - ipv4: - address: dhcp name: GigabitEthernet0/0 - name: GigabitEthernet0/1 - ipv4: - address: 192.168.4.2 255.255.255.0 secondary: true - address: 192.168.4.1 255.255.255.0 name: GigabitEthernet0/2 deleted: before: - name: loopback888 - name: loopback999 - ipv4: - address: dhcp name: GigabitEthernet0/0 - ipv4: - address: dhcp dhcp_client: 0 dhcp_hostname: test.com name: GigabitEthernet0/1 - ipv4: - address: 192.168.2.1 255.255.255.0 secondary: true - address: 192.168.2.2 255.255.255.0 ipv6: - address: fd5d:12c9:2201:1::1/64 name: GigabitEthernet0/2 commands: - "interface GigabitEthernet0/1" - "no ip address" - "interface GigabitEthernet0/2" - "no ip address" - "no ipv6 address" after: - name: loopback888 - name: loopback999 - ipv4: - address: dhcp name: GigabitEthernet0/0 - name: GigabitEthernet0/1 - name: GigabitEthernet0/2
closed
ansible/ansible
https://github.com/ansible/ansible
61,719
eos_bgp / ios_bgp / iosxr_bgp not safe for collection
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY All 3 of these providers, do not work properly when converted to a collection. The following error is raise: could not find a suitable provider for this module https://object-storage-ca-ymq-1.vexxhost.net/v1/a0b4156a37f9453eb4ec7db5422272df/ansible_20/20/30f1a39eece8bc37eeb89f05d733d133c707626e/check/ansible-test-network-integration-eos-python27/ca018b0/controller/ara-report/ https://object-storage-ca-ymq-1.vexxhost.net/v1/a0b4156a37f9453eb4ec7db5422272df/ansible_15/15/2b0be560d30c4c24620b0ade29bd62b7fbb57b22/check/ansible-test-network-integration-ios-python37/a516d70/controller/ara-report/ https://object-storage-ca-ymq-1.vexxhost.net/v1/a0b4156a37f9453eb4ec7db5422272df/ansible_15/15/2019f0b07076ca58ddb9e9ac0be65402b477eec1/check/ansible-test-network-integration-iosxr-python27/38b49a0/controller/ara-report/ This appears to be related to how we look up the module / provider for bgp tasks. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> eos_bgp ios_bgp iosxr_bgp ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible stable-2.9 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> zuul.ansible.com ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> git clone any one of the collections below, then run ansible-test network integration testing. <!--- Paste example playbooks or commands between quotes below --> ```yaml https://github.com/ansible-network/ansible_collections.arista.eos https://github.com/ansible-network/ansible_collections.cisco.ios https://github.com/ansible-network/ansible_collections.cisco.iosxr ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below 2019-08-31 03:48:50,568 p=zuul u=5694 | <74.63.204.64> EXEC /bin/sh -c 'rm -f -r /home/zuul/.ansible/tmp/ansible-local-5510omNavF/ansible-tmp-1567223329.55-30831165844767/ > /dev/null 2>&1 && sleep 0' 2019-08-31 03:48:50,581 p=zuul u=5510 | The full traceback is: WARNING: The below traceback may *not* be related to the actual failure. File "/tmp/ansible_arista.eos.eos_bgp_payload_qprZAR/ansible_arista.eos.eos_bgp_payload.zip/ansible_collections/arista/eos/plugins/module_utils/network/eos/providers/module.py", line 63, in edit_config commands = self.provider.edit_config(current_config) File "/tmp/ansible_arista.eos.eos_bgp_payload_qprZAR/ansible_arista.eos.eos_bgp_payload.zip/ansible_collections/arista/eos/plugins/module_utils/network/eos/providers/module.py", line 36, in provider cls = providers.get(network_os, self._name, connection_type) File "/tmp/ansible_arista.eos.eos_bgp_payload_qprZAR/ansible_arista.eos.eos_bgp_payload.zip/ansible_collections/arista/eos/plugins/module_utils/network/eos/providers/providers.py", line 44, in get raise ValueError("could not find a suitable provider for this module") 2019-08-31 03:48:50,587 p=zuul u=5510 | fatal: [eos-4.20.10]: FAILED! => changed=false invocation: module_args: config: address_family: null bgp_as: 64496 log_neighbor_changes: null neighbors: null networks: null redistribute: null router_id: 192.0.2.2 operation: merge msg: could not find a suitable provider for this module ```
https://github.com/ansible/ansible/issues/61719
https://github.com/ansible/ansible/pull/61761
9ee0deea24e04b5221edec74c74da922a2544a29
a795f6941eb50c9e21ff20c297700c1e264223b5
2019-09-03T16:11:56Z
python
2019-09-04T14:29:00Z
lib/ansible/module_utils/network/eos/providers/module.py
# # (c) 2019, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.eos.providers import providers from ansible.module_utils._text import to_text class NetworkModule(AnsibleModule): fail_on_missing_provider = True def __init__(self, connection=None, *args, **kwargs): super(NetworkModule, self).__init__(*args, **kwargs) if connection is None: connection = Connection(self._socket_path) self.connection = connection @property def provider(self): if not hasattr(self, '_provider'): capabilities = self.from_json(self.connection.get_capabilities()) network_os = capabilities['device_info']['network_os'] network_api = capabilities['network_api'] if network_api == 'cliconf': connection_type = 'network_cli' cls = providers.get(network_os, self._name, connection_type) if not cls: msg = 'unable to find suitable provider for network os %s' % network_os if self.fail_on_missing_provider: self.fail_json(msg=msg) else: self.warn(msg) obj = cls(self.params, self.connection, self.check_mode) setattr(self, '_provider', obj) return getattr(self, '_provider') def get_facts(self, subset=None): try: self.provider.get_facts(subset) except Exception as exc: self.fail_json(msg=to_text(exc)) def edit_config(self, config_filter=None): current_config = self.connection.get_config(flags=config_filter) try: commands = self.provider.edit_config(current_config) changed = bool(commands) return {'commands': commands, 'changed': changed} except Exception as exc: self.fail_json(msg=to_text(exc))
closed
ansible/ansible
https://github.com/ansible/ansible
61,719
eos_bgp / ios_bgp / iosxr_bgp not safe for collection
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY All 3 of these providers, do not work properly when converted to a collection. The following error is raise: could not find a suitable provider for this module https://object-storage-ca-ymq-1.vexxhost.net/v1/a0b4156a37f9453eb4ec7db5422272df/ansible_20/20/30f1a39eece8bc37eeb89f05d733d133c707626e/check/ansible-test-network-integration-eos-python27/ca018b0/controller/ara-report/ https://object-storage-ca-ymq-1.vexxhost.net/v1/a0b4156a37f9453eb4ec7db5422272df/ansible_15/15/2b0be560d30c4c24620b0ade29bd62b7fbb57b22/check/ansible-test-network-integration-ios-python37/a516d70/controller/ara-report/ https://object-storage-ca-ymq-1.vexxhost.net/v1/a0b4156a37f9453eb4ec7db5422272df/ansible_15/15/2019f0b07076ca58ddb9e9ac0be65402b477eec1/check/ansible-test-network-integration-iosxr-python27/38b49a0/controller/ara-report/ This appears to be related to how we look up the module / provider for bgp tasks. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> eos_bgp ios_bgp iosxr_bgp ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible stable-2.9 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> zuul.ansible.com ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> git clone any one of the collections below, then run ansible-test network integration testing. <!--- Paste example playbooks or commands between quotes below --> ```yaml https://github.com/ansible-network/ansible_collections.arista.eos https://github.com/ansible-network/ansible_collections.cisco.ios https://github.com/ansible-network/ansible_collections.cisco.iosxr ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below 2019-08-31 03:48:50,568 p=zuul u=5694 | <74.63.204.64> EXEC /bin/sh -c 'rm -f -r /home/zuul/.ansible/tmp/ansible-local-5510omNavF/ansible-tmp-1567223329.55-30831165844767/ > /dev/null 2>&1 && sleep 0' 2019-08-31 03:48:50,581 p=zuul u=5510 | The full traceback is: WARNING: The below traceback may *not* be related to the actual failure. File "/tmp/ansible_arista.eos.eos_bgp_payload_qprZAR/ansible_arista.eos.eos_bgp_payload.zip/ansible_collections/arista/eos/plugins/module_utils/network/eos/providers/module.py", line 63, in edit_config commands = self.provider.edit_config(current_config) File "/tmp/ansible_arista.eos.eos_bgp_payload_qprZAR/ansible_arista.eos.eos_bgp_payload.zip/ansible_collections/arista/eos/plugins/module_utils/network/eos/providers/module.py", line 36, in provider cls = providers.get(network_os, self._name, connection_type) File "/tmp/ansible_arista.eos.eos_bgp_payload_qprZAR/ansible_arista.eos.eos_bgp_payload.zip/ansible_collections/arista/eos/plugins/module_utils/network/eos/providers/providers.py", line 44, in get raise ValueError("could not find a suitable provider for this module") 2019-08-31 03:48:50,587 p=zuul u=5510 | fatal: [eos-4.20.10]: FAILED! => changed=false invocation: module_args: config: address_family: null bgp_as: 64496 log_neighbor_changes: null neighbors: null networks: null redistribute: null router_id: 192.0.2.2 operation: merge msg: could not find a suitable provider for this module ```
https://github.com/ansible/ansible/issues/61719
https://github.com/ansible/ansible/pull/61761
9ee0deea24e04b5221edec74c74da922a2544a29
a795f6941eb50c9e21ff20c297700c1e264223b5
2019-09-03T16:11:56Z
python
2019-09-04T14:29:00Z
lib/ansible/module_utils/network/frr/providers/module.py
# # (c) 2019, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.frr.providers import providers from ansible.module_utils._text import to_text class NetworkModule(AnsibleModule): fail_on_missing_provider = True def __init__(self, connection=None, *args, **kwargs): super(NetworkModule, self).__init__(*args, **kwargs) if connection is None: connection = Connection(self._socket_path) self.connection = connection @property def provider(self): if not hasattr(self, '_provider'): capabilities = self.from_json(self.connection.get_capabilities()) network_os = capabilities['device_info']['network_os'] network_api = capabilities['network_api'] if network_api == 'cliconf': connection_type = 'network_cli' cls = providers.get(network_os, self._name, connection_type) if not cls: msg = 'unable to find suitable provider for network os %s' % network_os if self.fail_on_missing_provider: self.fail_json(msg=msg) else: self.warn(msg) obj = cls(self.params, self.connection, self.check_mode) setattr(self, '_provider', obj) return getattr(self, '_provider') def get_facts(self, subset=None): try: self.provider.get_facts(subset) except Exception as exc: self.fail_json(msg=to_text(exc)) def edit_config(self, config_filter=None): current_config = self.connection.get_config(flags=config_filter) try: commands = self.provider.edit_config(current_config) changed = bool(commands) return {'commands': commands, 'changed': changed} except Exception as exc: self.fail_json(msg=to_text(exc))
closed
ansible/ansible
https://github.com/ansible/ansible
61,719
eos_bgp / ios_bgp / iosxr_bgp not safe for collection
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY All 3 of these providers, do not work properly when converted to a collection. The following error is raise: could not find a suitable provider for this module https://object-storage-ca-ymq-1.vexxhost.net/v1/a0b4156a37f9453eb4ec7db5422272df/ansible_20/20/30f1a39eece8bc37eeb89f05d733d133c707626e/check/ansible-test-network-integration-eos-python27/ca018b0/controller/ara-report/ https://object-storage-ca-ymq-1.vexxhost.net/v1/a0b4156a37f9453eb4ec7db5422272df/ansible_15/15/2b0be560d30c4c24620b0ade29bd62b7fbb57b22/check/ansible-test-network-integration-ios-python37/a516d70/controller/ara-report/ https://object-storage-ca-ymq-1.vexxhost.net/v1/a0b4156a37f9453eb4ec7db5422272df/ansible_15/15/2019f0b07076ca58ddb9e9ac0be65402b477eec1/check/ansible-test-network-integration-iosxr-python27/38b49a0/controller/ara-report/ This appears to be related to how we look up the module / provider for bgp tasks. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> eos_bgp ios_bgp iosxr_bgp ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible stable-2.9 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> zuul.ansible.com ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> git clone any one of the collections below, then run ansible-test network integration testing. <!--- Paste example playbooks or commands between quotes below --> ```yaml https://github.com/ansible-network/ansible_collections.arista.eos https://github.com/ansible-network/ansible_collections.cisco.ios https://github.com/ansible-network/ansible_collections.cisco.iosxr ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below 2019-08-31 03:48:50,568 p=zuul u=5694 | <74.63.204.64> EXEC /bin/sh -c 'rm -f -r /home/zuul/.ansible/tmp/ansible-local-5510omNavF/ansible-tmp-1567223329.55-30831165844767/ > /dev/null 2>&1 && sleep 0' 2019-08-31 03:48:50,581 p=zuul u=5510 | The full traceback is: WARNING: The below traceback may *not* be related to the actual failure. File "/tmp/ansible_arista.eos.eos_bgp_payload_qprZAR/ansible_arista.eos.eos_bgp_payload.zip/ansible_collections/arista/eos/plugins/module_utils/network/eos/providers/module.py", line 63, in edit_config commands = self.provider.edit_config(current_config) File "/tmp/ansible_arista.eos.eos_bgp_payload_qprZAR/ansible_arista.eos.eos_bgp_payload.zip/ansible_collections/arista/eos/plugins/module_utils/network/eos/providers/module.py", line 36, in provider cls = providers.get(network_os, self._name, connection_type) File "/tmp/ansible_arista.eos.eos_bgp_payload_qprZAR/ansible_arista.eos.eos_bgp_payload.zip/ansible_collections/arista/eos/plugins/module_utils/network/eos/providers/providers.py", line 44, in get raise ValueError("could not find a suitable provider for this module") 2019-08-31 03:48:50,587 p=zuul u=5510 | fatal: [eos-4.20.10]: FAILED! => changed=false invocation: module_args: config: address_family: null bgp_as: 64496 log_neighbor_changes: null neighbors: null networks: null redistribute: null router_id: 192.0.2.2 operation: merge msg: could not find a suitable provider for this module ```
https://github.com/ansible/ansible/issues/61719
https://github.com/ansible/ansible/pull/61761
9ee0deea24e04b5221edec74c74da922a2544a29
a795f6941eb50c9e21ff20c297700c1e264223b5
2019-09-03T16:11:56Z
python
2019-09-04T14:29:00Z
lib/ansible/module_utils/network/ios/providers/module.py
# # (c) 2019, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.ios.providers import providers from ansible.module_utils._text import to_text class NetworkModule(AnsibleModule): fail_on_missing_provider = True def __init__(self, connection=None, *args, **kwargs): super(NetworkModule, self).__init__(*args, **kwargs) if connection is None: connection = Connection(self._socket_path) self.connection = connection @property def provider(self): if not hasattr(self, '_provider'): capabilities = self.from_json(self.connection.get_capabilities()) network_os = capabilities['device_info']['network_os'] network_api = capabilities['network_api'] if network_api == 'cliconf': connection_type = 'network_cli' cls = providers.get(network_os, self._name, connection_type) if not cls: msg = 'unable to find suitable provider for network os %s' % network_os if self.fail_on_missing_provider: self.fail_json(msg=msg) else: self.warn(msg) obj = cls(self.params, self.connection, self.check_mode) setattr(self, '_provider', obj) return getattr(self, '_provider') def get_facts(self, subset=None): try: self.provider.get_facts(subset) except Exception as exc: self.fail_json(msg=to_text(exc)) def edit_config(self, config_filter=None): current_config = self.connection.get_config(flags=config_filter) try: commands = self.provider.edit_config(current_config) changed = bool(commands) return {'commands': commands, 'changed': changed} except Exception as exc: self.fail_json(msg=to_text(exc))
closed
ansible/ansible
https://github.com/ansible/ansible
61,719
eos_bgp / ios_bgp / iosxr_bgp not safe for collection
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY All 3 of these providers, do not work properly when converted to a collection. The following error is raise: could not find a suitable provider for this module https://object-storage-ca-ymq-1.vexxhost.net/v1/a0b4156a37f9453eb4ec7db5422272df/ansible_20/20/30f1a39eece8bc37eeb89f05d733d133c707626e/check/ansible-test-network-integration-eos-python27/ca018b0/controller/ara-report/ https://object-storage-ca-ymq-1.vexxhost.net/v1/a0b4156a37f9453eb4ec7db5422272df/ansible_15/15/2b0be560d30c4c24620b0ade29bd62b7fbb57b22/check/ansible-test-network-integration-ios-python37/a516d70/controller/ara-report/ https://object-storage-ca-ymq-1.vexxhost.net/v1/a0b4156a37f9453eb4ec7db5422272df/ansible_15/15/2019f0b07076ca58ddb9e9ac0be65402b477eec1/check/ansible-test-network-integration-iosxr-python27/38b49a0/controller/ara-report/ This appears to be related to how we look up the module / provider for bgp tasks. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> eos_bgp ios_bgp iosxr_bgp ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible stable-2.9 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> zuul.ansible.com ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> git clone any one of the collections below, then run ansible-test network integration testing. <!--- Paste example playbooks or commands between quotes below --> ```yaml https://github.com/ansible-network/ansible_collections.arista.eos https://github.com/ansible-network/ansible_collections.cisco.ios https://github.com/ansible-network/ansible_collections.cisco.iosxr ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below 2019-08-31 03:48:50,568 p=zuul u=5694 | <74.63.204.64> EXEC /bin/sh -c 'rm -f -r /home/zuul/.ansible/tmp/ansible-local-5510omNavF/ansible-tmp-1567223329.55-30831165844767/ > /dev/null 2>&1 && sleep 0' 2019-08-31 03:48:50,581 p=zuul u=5510 | The full traceback is: WARNING: The below traceback may *not* be related to the actual failure. File "/tmp/ansible_arista.eos.eos_bgp_payload_qprZAR/ansible_arista.eos.eos_bgp_payload.zip/ansible_collections/arista/eos/plugins/module_utils/network/eos/providers/module.py", line 63, in edit_config commands = self.provider.edit_config(current_config) File "/tmp/ansible_arista.eos.eos_bgp_payload_qprZAR/ansible_arista.eos.eos_bgp_payload.zip/ansible_collections/arista/eos/plugins/module_utils/network/eos/providers/module.py", line 36, in provider cls = providers.get(network_os, self._name, connection_type) File "/tmp/ansible_arista.eos.eos_bgp_payload_qprZAR/ansible_arista.eos.eos_bgp_payload.zip/ansible_collections/arista/eos/plugins/module_utils/network/eos/providers/providers.py", line 44, in get raise ValueError("could not find a suitable provider for this module") 2019-08-31 03:48:50,587 p=zuul u=5510 | fatal: [eos-4.20.10]: FAILED! => changed=false invocation: module_args: config: address_family: null bgp_as: 64496 log_neighbor_changes: null neighbors: null networks: null redistribute: null router_id: 192.0.2.2 operation: merge msg: could not find a suitable provider for this module ```
https://github.com/ansible/ansible/issues/61719
https://github.com/ansible/ansible/pull/61761
9ee0deea24e04b5221edec74c74da922a2544a29
a795f6941eb50c9e21ff20c297700c1e264223b5
2019-09-03T16:11:56Z
python
2019-09-04T14:29:00Z
lib/ansible/module_utils/network/iosxr/providers/module.py
# # (c) 2019, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.iosxr.providers import providers from ansible.module_utils._text import to_text class NetworkModule(AnsibleModule): fail_on_missing_provider = True def __init__(self, connection=None, *args, **kwargs): super(NetworkModule, self).__init__(*args, **kwargs) if connection is None: connection = Connection(self._socket_path) self.connection = connection @property def provider(self): if not hasattr(self, '_provider'): capabilities = self.from_json(self.connection.get_capabilities()) network_os = capabilities['device_info']['network_os'] network_api = capabilities['network_api'] if network_api == 'cliconf': connection_type = 'network_cli' cls = providers.get(network_os, self._name, connection_type) if not cls: msg = 'unable to find suitable provider for network os %s' % network_os if self.fail_on_missing_provider: self.fail_json(msg=msg) else: self.warn(msg) obj = cls(self.params, self.connection, self.check_mode) setattr(self, '_provider', obj) return getattr(self, '_provider') def get_facts(self, subset=None): try: self.provider.get_facts(subset) except Exception as exc: self.fail_json(msg=to_text(exc)) def edit_config(self, config_filter=None): current_config = self.connection.get_config(flags=config_filter) try: commands = self.provider.edit_config(current_config) changed = bool(commands) return {'commands': commands, 'changed': changed} except Exception as exc: self.fail_json(msg=to_text(exc))
closed
ansible/ansible
https://github.com/ansible/ansible
60,961
docker_compose treats Docker deprecation notice as a failure
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY The latest Docker release has a bug that causes it to spit out a spurious deprecation warning when you pull an image. https://github.com/moby/moby/issues/39701 Ansible's `docker_compose` module treats that warning as a fatal error even though the operation actually succeeds. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME docker_compose ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.8.4 config file = /Users/koreth/work/terraform-configs/ansible/ansible.cfg configured module search path = ['/Users/koreth/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/Cellar/ansible/2.8.4/libexec/lib/python3.7/site-packages/ansible executable location = /usr/local/bin/ansible python version = 3.7.4 (default, Jul 9 2019, 18:13:23) [Clang 10.0.1 (clang-1001.0.46.4)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ANSIBLE_PIPELINING(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = True ANSIBLE_SSH_CONTROL_PATH(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = %(directory)s/%%C CACHE_PLUGIN(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = jsonfile CACHE_PLUGIN_CONNECTION(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = /tmp/ansible-facts.json CACHE_PLUGIN_TIMEOUT(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = 86400 DEFAULT_FORKS(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = 100 DEFAULT_GATHERING(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = smart DEFAULT_ROLES_PATH(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = ['/Users/koreth/work/terraform-configs/ansible/thirdparty', '/Users/koreth/work/terraform-configs/ansible/roles', '/Users/koreth/work/terraform-configs/roles'] DEFAULT_VAULT_PASSWORD_FILE(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = /Users/koreth/work/terraform-configs/ansible/.vault-password DEPRECATION_WARNINGS(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = False DISPLAY_SKIPPED_HOSTS(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = False RETRY_FILES_ENABLED(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = False ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Target OS is Ubuntu 18.04.1 Docker version 19.03.1, build 74b1e89 ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml - hosts: vagrant-test tasks: - docker_compose: project_name: test pull: yes definition: registrator: image: gliderlabs/registrator:v7 ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Docker image is pulled, service is launched, and task succeeds. ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> Docker image is pulled, service is launched, and task fails because of the unexpected warning message from Docker. <!--- Paste verbatim command output between quotes --> ```paste below TASK [docker_compose] ******************************************************************************* fatal: [vagrant-test]: FAILED! => {"changed": false, "module_stderr": "", "module_stdout": "v7: Pulling from gliderlabs/registrator\n[DEPRECATION NOTICE] registry v2 schema1 support will be removed in an upcoming release. Please contact admins of the docker.io registry NOW to avoid future disruption.\nDigest: sha256:0caaf84db8d645993964a0225c0485e7853135bf368393ce5a3a2c854379d476\nStatus: Image is up to date for gliderlabs/registrator:v7\n\n{\"services\": {\"registrator\": {\"test_registrator_1\": {\"cmd\": [], \"state\": {\"status\": \"running\", \"running\": true}, \"labels\": {\"com.docker.compose.service\": \"registrator\", \"com.docker.compose.config-hash\": \"5d06bcd558d0ce87526005a2d05a4c2b652d3952bdaeef9681d8552426863824\", \"com.docker.compose.project\": \"test\", \"com.docker.compose.version\": \"1.24.1\", \"com.docker.compose.oneoff\": \"False\", \"com.docker.compose.container-number\": \"1\"}, \"networks\": {\"bridge\": {\"macAddress\": \"02:42:ac:11:00:02\", \"globalIPv6PrefixLen\": 0, \"links\": null, \"IPPrefixLen\": 16, \"globalIPv6\": \"\", \"IPAddress\": \"172.17.0.2\", \"aliases\": []}}, \"image\": \"gliderlabs/registrator:v7\"}}}, \"invocation\": {\"module_args\": {\"project_src\": null, \"hostname_check\": false, \"recreate\": \"smart\", \"docker_host\": \"unix://var/run/docker.sock\", \"remove_images\": null, \"client_key\": null, \"scale\": null, \"nocache\": false, \"remove_volumes\": false, \"state\": \"present\", \"build\": false, \"client_cert\": null, \"api_version\": \"auto\", \"files\": null, \"project_name\": \"test\", \"ca_cert\": null, \"pull\": true, \"tls_hostname\": \"localhost\", \"dependencies\": true, \"services\": null, \"tls\": false, \"definition\": {\"registrator\": {\"image\": \"gliderlabs/registrator:v7\"}}, \"remove_orphans\": false, \"restarted\": false, \"ssl_version\": null, \"stopped\": false, \"timeout\": 10, \"debug\": false, \"validate_certs\": false}}, \"changed\": true, \"ansible_facts\": {\"registrator\": {\"test_registrator_1\": {\"cmd\": [], \"state\": {\"status\": \"running\", \"running\": true}, \"labels\": {\"com.docker.compose.service\": \"registrator\", \"com.docker.compose.config-hash\": \"5d06bcd558d0ce87526005a2d05a4c2b652d3952bdaeef9681d8552426863824\", \"com.docker.compose.project\": \"test\", \"com.docker.compose.version\": \"1.24.1\", \"com.docker.compose.oneoff\": \"False\", \"com.docker.compose.container-number\": \"1\"}, \"networks\": {\"bridge\": {\"macAddress\": \"02:42:ac:11:00:02\", \"globalIPv6PrefixLen\": 0, \"links\": null, \"IPPrefixLen\": 16, \"globalIPv6\": \"\", \"IPAddress\": \"172.17.0.2\", \"aliases\": []}}, \"image\": \"gliderlabs/registrator:v7\"}}}}\n", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 0} ```
https://github.com/ansible/ansible/issues/60961
https://github.com/ansible/ansible/pull/61650
d5c8d325e43cdea45a2deccd47023ee86b072b43
0c73e47a42f69901ea892f9d0e58acb554f4e668
2019-08-20T16:37:00Z
python
2019-09-04T17:34:05Z
changelogs/fragments/60961-docker_compose-fix-deprecation-warning.yml
closed
ansible/ansible
https://github.com/ansible/ansible
60,961
docker_compose treats Docker deprecation notice as a failure
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY The latest Docker release has a bug that causes it to spit out a spurious deprecation warning when you pull an image. https://github.com/moby/moby/issues/39701 Ansible's `docker_compose` module treats that warning as a fatal error even though the operation actually succeeds. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME docker_compose ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.8.4 config file = /Users/koreth/work/terraform-configs/ansible/ansible.cfg configured module search path = ['/Users/koreth/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/Cellar/ansible/2.8.4/libexec/lib/python3.7/site-packages/ansible executable location = /usr/local/bin/ansible python version = 3.7.4 (default, Jul 9 2019, 18:13:23) [Clang 10.0.1 (clang-1001.0.46.4)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ANSIBLE_PIPELINING(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = True ANSIBLE_SSH_CONTROL_PATH(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = %(directory)s/%%C CACHE_PLUGIN(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = jsonfile CACHE_PLUGIN_CONNECTION(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = /tmp/ansible-facts.json CACHE_PLUGIN_TIMEOUT(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = 86400 DEFAULT_FORKS(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = 100 DEFAULT_GATHERING(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = smart DEFAULT_ROLES_PATH(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = ['/Users/koreth/work/terraform-configs/ansible/thirdparty', '/Users/koreth/work/terraform-configs/ansible/roles', '/Users/koreth/work/terraform-configs/roles'] DEFAULT_VAULT_PASSWORD_FILE(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = /Users/koreth/work/terraform-configs/ansible/.vault-password DEPRECATION_WARNINGS(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = False DISPLAY_SKIPPED_HOSTS(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = False RETRY_FILES_ENABLED(/Users/koreth/work/terraform-configs/ansible/ansible.cfg) = False ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Target OS is Ubuntu 18.04.1 Docker version 19.03.1, build 74b1e89 ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml - hosts: vagrant-test tasks: - docker_compose: project_name: test pull: yes definition: registrator: image: gliderlabs/registrator:v7 ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Docker image is pulled, service is launched, and task succeeds. ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> Docker image is pulled, service is launched, and task fails because of the unexpected warning message from Docker. <!--- Paste verbatim command output between quotes --> ```paste below TASK [docker_compose] ******************************************************************************* fatal: [vagrant-test]: FAILED! => {"changed": false, "module_stderr": "", "module_stdout": "v7: Pulling from gliderlabs/registrator\n[DEPRECATION NOTICE] registry v2 schema1 support will be removed in an upcoming release. Please contact admins of the docker.io registry NOW to avoid future disruption.\nDigest: sha256:0caaf84db8d645993964a0225c0485e7853135bf368393ce5a3a2c854379d476\nStatus: Image is up to date for gliderlabs/registrator:v7\n\n{\"services\": {\"registrator\": {\"test_registrator_1\": {\"cmd\": [], \"state\": {\"status\": \"running\", \"running\": true}, \"labels\": {\"com.docker.compose.service\": \"registrator\", \"com.docker.compose.config-hash\": \"5d06bcd558d0ce87526005a2d05a4c2b652d3952bdaeef9681d8552426863824\", \"com.docker.compose.project\": \"test\", \"com.docker.compose.version\": \"1.24.1\", \"com.docker.compose.oneoff\": \"False\", \"com.docker.compose.container-number\": \"1\"}, \"networks\": {\"bridge\": {\"macAddress\": \"02:42:ac:11:00:02\", \"globalIPv6PrefixLen\": 0, \"links\": null, \"IPPrefixLen\": 16, \"globalIPv6\": \"\", \"IPAddress\": \"172.17.0.2\", \"aliases\": []}}, \"image\": \"gliderlabs/registrator:v7\"}}}, \"invocation\": {\"module_args\": {\"project_src\": null, \"hostname_check\": false, \"recreate\": \"smart\", \"docker_host\": \"unix://var/run/docker.sock\", \"remove_images\": null, \"client_key\": null, \"scale\": null, \"nocache\": false, \"remove_volumes\": false, \"state\": \"present\", \"build\": false, \"client_cert\": null, \"api_version\": \"auto\", \"files\": null, \"project_name\": \"test\", \"ca_cert\": null, \"pull\": true, \"tls_hostname\": \"localhost\", \"dependencies\": true, \"services\": null, \"tls\": false, \"definition\": {\"registrator\": {\"image\": \"gliderlabs/registrator:v7\"}}, \"remove_orphans\": false, \"restarted\": false, \"ssl_version\": null, \"stopped\": false, \"timeout\": 10, \"debug\": false, \"validate_certs\": false}}, \"changed\": true, \"ansible_facts\": {\"registrator\": {\"test_registrator_1\": {\"cmd\": [], \"state\": {\"status\": \"running\", \"running\": true}, \"labels\": {\"com.docker.compose.service\": \"registrator\", \"com.docker.compose.config-hash\": \"5d06bcd558d0ce87526005a2d05a4c2b652d3952bdaeef9681d8552426863824\", \"com.docker.compose.project\": \"test\", \"com.docker.compose.version\": \"1.24.1\", \"com.docker.compose.oneoff\": \"False\", \"com.docker.compose.container-number\": \"1\"}, \"networks\": {\"bridge\": {\"macAddress\": \"02:42:ac:11:00:02\", \"globalIPv6PrefixLen\": 0, \"links\": null, \"IPPrefixLen\": 16, \"globalIPv6\": \"\", \"IPAddress\": \"172.17.0.2\", \"aliases\": []}}, \"image\": \"gliderlabs/registrator:v7\"}}}}\n", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 0} ```
https://github.com/ansible/ansible/issues/60961
https://github.com/ansible/ansible/pull/61650
d5c8d325e43cdea45a2deccd47023ee86b072b43
0c73e47a42f69901ea892f9d0e58acb554f4e668
2019-08-20T16:37:00Z
python
2019-09-04T17:34:05Z
lib/ansible/modules/cloud/docker/docker_compose.py
#!/usr/bin/python # # Copyright 2016 Red Hat | Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: docker_compose short_description: Manage multi-container Docker applications with Docker Compose. version_added: "2.1" author: "Chris Houseknecht (@chouseknecht)" description: - Uses Docker Compose to start, shutdown and scale services. - Works with compose versions 1 and 2. - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option. - See the examples for more details. - Supports check mode. - This module was called C(docker_service) before Ansible 2.8. The usage did not change. options: project_src: description: - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file. - Mutually exclusive with I(definition). - Required when no I(definition) is provided. type: path project_name: description: - Provide a project name. If not provided, the project name is taken from the basename of I(project_src). - Required when I(definition) is provided. type: str files: description: - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml). - Files are loaded and merged in the order given. type: list state: description: - Desired state of the project. - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted)). - Specifying C(absent) is the same as running C(docker-compose down). type: str default: present choices: - absent - present services: description: - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted)) on a subset of services. - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)). type: list scale: description: - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key is the name of the service and the value is an integer count for the number of containers. type: dict dependencies: description: - When I(state) is C(present) specify whether or not to include linked services. type: bool default: yes definition: description: - Compose file describing one or more services, networks and volumes. - Mutually exclusive with I(project_src) and I(files). type: dict hostname_check: description: - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate. type: bool default: no recreate: description: - By default containers will be recreated when their configuration differs from the service definition. - Setting to C(never) ignores configuration differences and leaves existing containers unchanged. - Setting to C(always) forces recreation of all existing containers. type: str default: smart choices: - always - never - smart build: description: - Use with I(state) C(present) to always build images prior to starting the application. - Same as running C(docker-compose build) with the pull option. - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents. - Use the I(nocache) option to ignore the image cache when performing the build. - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never). type: bool default: no pull: description: - Use with I(state) C(present) to always pull images prior to starting the application. - Same as running C(docker-compose pull). - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never). type: bool default: no version_added: "2.2" nocache: description: - Use with the I(build) option to ignore the cache during the image build process. type: bool default: no version_added: "2.2" remove_images: description: - Use with I(state) C(absent) to remove all images or only local images. type: str choices: - 'all' - 'local' remove_volumes: description: - Use with I(state) C(absent) to remove data volumes. type: bool default: no stopped: description: - Use with I(state) C(present) to stop all containers defined in the Compose file. - If I(services) is defined, only the containers listed there will be stopped. type: bool default: no restarted: description: - Use with I(state) C(present) to restart all containers defined in the Compose file. - If I(services) is defined, only the containers listed there will be restarted. type: bool default: no remove_orphans: description: - Remove containers for services not defined in the Compose file. type: bool default: no timeout: description: - timeout in seconds for container shutdown when attached or when containers are already running. type: int default: 10 extends_documentation_fragment: - docker - docker.docker_py_1_documentation requirements: - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" - "docker-compose >= 1.7.0" - "Docker API >= 1.20" - "PyYAML >= 3.11" ''' EXAMPLES = ''' # Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the # flask directory - name: Run using a project directory hosts: localhost gather_facts: no tasks: - name: Tear down existing services docker_compose: project_src: flask state: absent - name: Create and start services docker_compose: project_src: flask register: output - debug: var: output - name: Run `docker-compose up` again docker_compose: project_src: flask build: no register: output - debug: var: output - assert: that: "not output.changed " - name: Stop all services docker_compose: project_src: flask build: no stopped: yes register: output - debug: var: output - assert: that: - "not web.flask_web_1.state.running" - "not db.flask_db_1.state.running" - name: Restart services docker_compose: project_src: flask build: no restarted: yes register: output - debug: var: output - assert: that: - "web.flask_web_1.state.running" - "db.flask_db_1.state.running" - name: Scale the web service to 2 hosts: localhost gather_facts: no tasks: - docker_compose: project_src: flask scale: web: 2 register: output - debug: var: output - name: Run with inline v2 compose hosts: localhost gather_facts: no tasks: - docker_compose: project_src: flask state: absent - docker_compose: project_name: flask definition: version: '2' services: db: image: postgres web: build: "{{ playbook_dir }}/flask" command: "python manage.py runserver 0.0.0.0:8000" volumes: - "{{ playbook_dir }}/flask:/code" ports: - "8000:8000" depends_on: - db register: output - debug: var: output - assert: that: - "web.flask_web_1.state.running" - "db.flask_db_1.state.running" - name: Run with inline v1 compose hosts: localhost gather_facts: no tasks: - docker_compose: project_src: flask state: absent - docker_compose: project_name: flask definition: db: image: postgres web: build: "{{ playbook_dir }}/flask" command: "python manage.py runserver 0.0.0.0:8000" volumes: - "{{ playbook_dir }}/flask:/code" ports: - "8000:8000" links: - db register: output - debug: var: output - assert: that: - "web.flask_web_1.state.running" - "db.flask_db_1.state.running" ''' RETURN = ''' services: description: - A dictionary mapping the service's name to a dictionary of containers. - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts are also accessible directly. The service's name is the variable with which the container dictionary can be accessed. Note that the returned facts will be removed in Ansible 2.12. returned: success type: complex contains: container_name: description: Name of the container. Format is C(project_service_#). returned: success type: complex contains: cmd: description: One or more commands to be executed in the container. returned: success type: list example: ["postgres"] image: description: Name of the image from which the container was built. returned: success type: str example: postgres labels: description: Meta data assigned to the container. returned: success type: complex example: {...} networks: description: Contains a dictionary for each network to which the container is a member. returned: success type: complex contains: IPAddress: description: The IP address assigned to the container. returned: success type: str example: 172.17.0.2 IPPrefixLen: description: Number of bits used by the subnet. returned: success type: int example: 16 aliases: description: Aliases assigned to the container by the network. returned: success type: list example: ['db'] globalIPv6: description: IPv6 address assigned to the container. returned: success type: str example: '' globalIPv6PrefixLen: description: IPv6 subnet length. returned: success type: int example: 0 links: description: List of container names to which this container is linked. returned: success type: list example: null macAddress: description: Mac Address assigned to the virtual NIC. returned: success type: str example: "02:42:ac:11:00:02" state: description: Information regarding the current disposition of the container. returned: success type: complex contains: running: description: Whether or not the container is up with a running process. returned: success type: bool example: true status: description: Description of the running state. returned: success type: str example: running actions: description: Provides the actions to be taken on each service as determined by compose. returned: when in check mode or I(debug) is C(yes) type: complex contains: service_name: description: Name of the service. returned: always type: complex contains: pulled_image: description: Provides image details when a new image is pulled for the service. returned: on image pull type: complex contains: name: description: name of the image returned: always type: str id: description: image hash returned: always type: str built_image: description: Provides image details when a new image is built for the service. returned: on image build type: complex contains: name: description: name of the image returned: always type: str id: description: image hash returned: always type: str action: description: A descriptive name of the action to be performed on the service's containers. returned: always type: list contains: id: description: the container's long ID returned: always type: str name: description: the container's name returned: always type: str short_id: description: the container's short ID returned: always type: str ''' import os import re import sys import tempfile import traceback from contextlib import contextmanager from distutils.version import LooseVersion try: import yaml HAS_YAML = True HAS_YAML_EXC = None except ImportError as dummy: HAS_YAML = False HAS_YAML_EXC = traceback.format_exc() try: from docker.errors import DockerException except ImportError: # missing Docker SDK for Python handled in ansible.module_utils.docker.common pass try: from compose import __version__ as compose_version from compose.cli.command import project_from_options from compose.service import NoSuchImageError from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt from compose.const import DEFAULT_TIMEOUT, LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF HAS_COMPOSE = True HAS_COMPOSE_EXC = None MINIMUM_COMPOSE_VERSION = '1.7.0' except ImportError as dummy: HAS_COMPOSE = False HAS_COMPOSE_EXC = traceback.format_exc() DEFAULT_TIMEOUT = 10 from ansible.module_utils.docker.common import ( AnsibleDockerClient, DockerBaseClass, RequestException, ) AUTH_PARAM_MAPPING = { u'docker_host': u'--host', u'tls': u'--tls', u'cacert_path': u'--tlscacert', u'cert_path': u'--tlscert', u'key_path': u'--tlskey', u'tls_verify': u'--tlsverify' } @contextmanager def stdout_redirector(path_name): old_stdout = sys.stdout fd = open(path_name, 'w') sys.stdout = fd try: yield finally: sys.stdout = old_stdout @contextmanager def stderr_redirector(path_name): old_fh = sys.stderr fd = open(path_name, 'w') sys.stderr = fd try: yield finally: sys.stderr = old_fh def make_redirection_tempfiles(): dummy, out_redir_name = tempfile.mkstemp(prefix="ansible") dummy, err_redir_name = tempfile.mkstemp(prefix="ansible") return (out_redir_name, err_redir_name) def cleanup_redirection_tempfiles(out_name, err_name): for i in [out_name, err_name]: os.remove(i) def get_redirected_output(path_name): output = [] with open(path_name, 'r') as fd: for line in fd: # strip terminal format/color chars new_line = re.sub(r'\x1b\[.+m', '', line) output.append(new_line) os.remove(path_name) return output def attempt_extract_errors(exc_str, stdout, stderr): errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')] errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')]) warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')] warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')]) # assume either the exception body (if present) or the last warning was the 'most' # fatal. if exc_str.strip(): msg = exc_str.strip() elif errors: msg = errors[-1].encode('utf-8') else: msg = 'unknown cause' return { 'warnings': [w.encode('utf-8') for w in warnings], 'errors': [e.encode('utf-8') for e in errors], 'msg': msg, 'module_stderr': ''.join(stderr), 'module_stdout': ''.join(stdout) } def get_failure_info(exc, out_name, err_name=None, msg_format='%s'): if err_name is None: stderr = [] else: stderr = get_redirected_output(err_name) stdout = get_redirected_output(out_name) reason = attempt_extract_errors(str(exc), stdout, stderr) reason['msg'] = msg_format % reason['msg'] return reason class ContainerManager(DockerBaseClass): def __init__(self, client): super(ContainerManager, self).__init__() self.client = client self.project_src = None self.files = None self.project_name = None self.state = None self.definition = None self.hostname_check = None self.timeout = None self.remove_images = None self.remove_orphans = None self.remove_volumes = None self.stopped = None self.restarted = None self.recreate = None self.build = None self.dependencies = None self.services = None self.scale = None self.debug = None self.pull = None self.nocache = None for key, value in client.module.params.items(): setattr(self, key, value) self.check_mode = client.check_mode if not self.debug: self.debug = client.module._debug self.options = dict() self.options.update(self._get_auth_options()) self.options[u'--skip-hostname-check'] = (not self.hostname_check) if self.project_name: self.options[u'--project-name'] = self.project_name if self.files: self.options[u'--file'] = self.files if not HAS_COMPOSE: self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" % HAS_COMPOSE_EXC) if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION): self.client.fail("Found docker-compose version %s. Minimum required version is %s. " "Upgrade docker-compose to a min version of %s." % (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION)) self.log("options: ") self.log(self.options, pretty_print=True) if self.definition: if not HAS_YAML: self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % HAS_YAML_EXC) if not self.project_name: self.client.fail("Parameter error - project_name required when providing definition.") self.project_src = tempfile.mkdtemp(prefix="ansible") compose_file = os.path.join(self.project_src, "docker-compose.yml") try: self.log('writing: ') self.log(yaml.dump(self.definition, default_flow_style=False)) with open(compose_file, 'w') as f: f.write(yaml.dump(self.definition, default_flow_style=False)) except Exception as exc: self.client.fail("Error writing to %s - %s" % (compose_file, str(exc))) else: if not self.project_src: self.client.fail("Parameter error - project_src required.") try: self.log("project_src: %s" % self.project_src) self.project = project_from_options(self.project_src, self.options) except Exception as exc: self.client.fail("Configuration error - %s" % str(exc)) def exec_module(self): result = dict() if self.state == 'present': result = self.cmd_up() elif self.state == 'absent': result = self.cmd_down() if self.definition: compose_file = os.path.join(self.project_src, "docker-compose.yml") self.log("removing %s" % compose_file) os.remove(compose_file) self.log("removing %s" % self.project_src) os.rmdir(self.project_src) if not self.check_mode and not self.debug and result.get('actions'): result.pop('actions') return result def _get_auth_options(self): options = dict() for key, value in self.client.auth_params.items(): if value is not None: option = AUTH_PARAM_MAPPING.get(key) if option: options[option] = value return options def cmd_up(self): start_deps = self.dependencies service_names = self.services detached = True result = dict(changed=False, actions=[], ansible_facts=dict(), services=dict()) up_options = { u'--no-recreate': False, u'--build': False, u'--no-build': False, u'--no-deps': False, u'--force-recreate': False, } if self.recreate == 'never': up_options[u'--no-recreate'] = True elif self.recreate == 'always': up_options[u'--force-recreate'] = True if self.remove_orphans: up_options[u'--remove-orphans'] = True converge = convergence_strategy_from_opts(up_options) self.log("convergence strategy: %s" % converge) if self.pull: pull_output = self.cmd_pull() result['changed'] = pull_output['changed'] result['actions'] += pull_output['actions'] if self.build: build_output = self.cmd_build() result['changed'] = build_output['changed'] result['actions'] += build_output['actions'] if self.remove_orphans: containers = self.client.containers( filters={ 'label': [ '{0}={1}'.format(LABEL_PROJECT, self.project.name), '{0}={1}'.format(LABEL_ONE_OFF, "False") ], } ) orphans = [] for container in containers: service_name = container.get('Labels', {}).get(LABEL_SERVICE) if service_name not in self.project.service_names: orphans.append(service_name) if orphans: result['changed'] = True for service in self.project.services: if not service_names or service.name in service_names: plan = service.convergence_plan(strategy=converge) if plan.action != 'noop': result['changed'] = True result_action = dict(service=service.name) result_action[plan.action] = [] for container in plan.containers: result_action[plan.action].append(dict( id=container.id, name=container.name, short_id=container.short_id, )) result['actions'].append(result_action) if not self.check_mode and result['changed']: out_redir_name, err_redir_name = make_redirection_tempfiles() try: with stdout_redirector(out_redir_name): with stderr_redirector(err_redir_name): do_build = build_action_from_opts(up_options) self.log('Setting do_build to %s' % do_build) self.project.up( service_names=service_names, start_deps=start_deps, strategy=converge, do_build=do_build, detached=detached, remove_orphans=self.remove_orphans, timeout=self.timeout) except Exception as exc: fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, msg_format="Error starting project %s") self.client.fail(**fail_reason) else: cleanup_redirection_tempfiles(out_redir_name, err_redir_name) if self.stopped: stop_output = self.cmd_stop(service_names) result['changed'] = stop_output['changed'] result['actions'] += stop_output['actions'] if self.restarted: restart_output = self.cmd_restart(service_names) result['changed'] = restart_output['changed'] result['actions'] += restart_output['actions'] if self.scale: scale_output = self.cmd_scale() result['changed'] = scale_output['changed'] result['actions'] += scale_output['actions'] for service in self.project.services: service_facts = dict() result['ansible_facts'][service.name] = service_facts result['services'][service.name] = service_facts for container in service.containers(stopped=True): inspection = container.inspect() # pare down the inspection data to the most useful bits facts = dict( cmd=[], labels=dict(), image=None, state=dict( running=None, status=None ), networks=dict() ) if inspection['Config'].get('Cmd', None) is not None: facts['cmd'] = inspection['Config']['Cmd'] if inspection['Config'].get('Labels', None) is not None: facts['labels'] = inspection['Config']['Labels'] if inspection['Config'].get('Image', None) is not None: facts['image'] = inspection['Config']['Image'] if inspection['State'].get('Running', None) is not None: facts['state']['running'] = inspection['State']['Running'] if inspection['State'].get('Status', None) is not None: facts['state']['status'] = inspection['State']['Status'] if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'): networks = inspection['NetworkSettings']['Networks'] for key in networks: facts['networks'][key] = dict( aliases=[], globalIPv6=None, globalIPv6PrefixLen=0, IPAddress=None, IPPrefixLen=0, links=None, macAddress=None, ) if networks[key].get('Aliases', None) is not None: facts['networks'][key]['aliases'] = networks[key]['Aliases'] if networks[key].get('GlobalIPv6Address', None) is not None: facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address'] if networks[key].get('GlobalIPv6PrefixLen', None) is not None: facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen'] if networks[key].get('IPAddress', None) is not None: facts['networks'][key]['IPAddress'] = networks[key]['IPAddress'] if networks[key].get('IPPrefixLen', None) is not None: facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen'] if networks[key].get('Links', None) is not None: facts['networks'][key]['links'] = networks[key]['Links'] if networks[key].get('MacAddress', None) is not None: facts['networks'][key]['macAddress'] = networks[key]['MacAddress'] service_facts[container.name] = facts return result def cmd_pull(self): result = dict( changed=False, actions=[], ) if not self.check_mode: for service in self.project.get_services(self.services, include_deps=False): if 'image' not in service.options: continue self.log('Pulling image for service %s' % service.name) # store the existing image ID old_image_id = '' try: image = service.image() if image and image.get('Id'): old_image_id = image['Id'] except NoSuchImageError: pass except Exception as exc: self.client.fail("Error: service image lookup failed - %s" % str(exc)) # pull the image try: service.pull(ignore_pull_failures=False) except Exception as exc: self.client.fail("Error: pull failed with %s" % str(exc)) # store the new image ID new_image_id = '' try: image = service.image() if image and image.get('Id'): new_image_id = image['Id'] except NoSuchImageError as exc: self.client.fail("Error: service image lookup failed after pull - %s" % str(exc)) if new_image_id != old_image_id: # if a new image was pulled result['changed'] = True result['actions'].append(dict( service=service.name, pulled_image=dict( name=service.image_name, id=new_image_id ) )) return result def cmd_build(self): result = dict( changed=False, actions=[] ) if not self.check_mode: for service in self.project.get_services(self.services, include_deps=False): if service.can_be_built(): self.log('Building image for service %s' % service.name) # store the existing image ID old_image_id = '' try: image = service.image() if image and image.get('Id'): old_image_id = image['Id'] except NoSuchImageError: pass except Exception as exc: self.client.fail("Error: service image lookup failed - %s" % str(exc)) # build the image try: new_image_id = service.build(pull=self.pull, no_cache=self.nocache) except Exception as exc: self.client.fail("Error: build failed with %s" % str(exc)) if new_image_id not in old_image_id: # if a new image was built result['changed'] = True result['actions'].append(dict( service=service.name, built_image=dict( name=service.image_name, id=new_image_id ) )) return result def cmd_down(self): result = dict( changed=False, actions=[] ) for service in self.project.services: containers = service.containers(stopped=True) if len(containers): result['changed'] = True result['actions'].append(dict( service=service.name, deleted=[container.name for container in containers] )) if not self.check_mode and result['changed']: image_type = image_type_from_opt('--rmi', self.remove_images) try: self.project.down(image_type, self.remove_volumes, self.remove_orphans) except Exception as exc: self.client.fail("Error stopping project - %s" % str(exc)) return result def cmd_stop(self, service_names): result = dict( changed=False, actions=[] ) for service in self.project.services: if not service_names or service.name in service_names: service_res = dict( service=service.name, stop=[] ) for container in service.containers(stopped=False): result['changed'] = True service_res['stop'].append(dict( id=container.id, name=container.name, short_id=container.short_id )) result['actions'].append(service_res) if not self.check_mode and result['changed']: out_redir_name, err_redir_name = make_redirection_tempfiles() try: with stdout_redirector(out_redir_name): with stderr_redirector(err_redir_name): self.project.stop(service_names=service_names, timeout=self.timeout) except Exception as exc: fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, msg_format="Error stopping project %s") self.client.fail(**fail_reason) else: cleanup_redirection_tempfiles(out_redir_name, err_redir_name) return result def cmd_restart(self, service_names): result = dict( changed=False, actions=[] ) for service in self.project.services: if not service_names or service.name in service_names: service_res = dict( service=service.name, restart=[] ) for container in service.containers(stopped=True): result['changed'] = True service_res['restart'].append(dict( id=container.id, name=container.name, short_id=container.short_id )) result['actions'].append(service_res) if not self.check_mode and result['changed']: out_redir_name, err_redir_name = make_redirection_tempfiles() try: with stdout_redirector(out_redir_name): with stderr_redirector(err_redir_name): self.project.restart(service_names=service_names, timeout=self.timeout) except Exception as exc: fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, msg_format="Error restarting project %s") self.client.fail(**fail_reason) else: cleanup_redirection_tempfiles(out_redir_name, err_redir_name) return result def cmd_scale(self): result = dict( changed=False, actions=[] ) for service in self.project.services: if service.name in self.scale: service_res = dict( service=service.name, scale=0 ) containers = service.containers(stopped=True) scale = self.parse_scale(service.name) if len(containers) != scale: result['changed'] = True service_res['scale'] = scale - len(containers) if not self.check_mode: try: service.scale(scale) except Exception as exc: self.client.fail("Error scaling %s - %s" % (service.name, str(exc))) result['actions'].append(service_res) return result def parse_scale(self, service_name): try: return int(self.scale[service_name]) except ValueError: self.client.fail("Error scaling %s - expected int, got %s", service_name, str(type(self.scale[service_name]))) def main(): argument_spec = dict( project_src=dict(type='path'), project_name=dict(type='str',), files=dict(type='list', elements='path'), state=dict(type='str', default='present', choices=['absent', 'present']), definition=dict(type='dict'), hostname_check=dict(type='bool', default=False), recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']), build=dict(type='bool', default=False), remove_images=dict(type='str', choices=['all', 'local']), remove_volumes=dict(type='bool', default=False), remove_orphans=dict(type='bool', default=False), stopped=dict(type='bool', default=False), restarted=dict(type='bool', default=False), scale=dict(type='dict'), services=dict(type='list', elements='str'), dependencies=dict(type='bool', default=True), pull=dict(type='bool', default=False), nocache=dict(type='bool', default=False), debug=dict(type='bool', default=False), timeout=dict(type='int', default=DEFAULT_TIMEOUT) ) mutually_exclusive = [ ('definition', 'project_src'), ('definition', 'files') ] client = AnsibleDockerClient( argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True, min_docker_api_version='1.20', ) if client.module._name == 'docker_service': client.module.deprecate("The 'docker_service' module has been renamed to 'docker_compose'.", version='2.12') try: result = ContainerManager(client).exec_module() client.module.exit_json(**result) except DockerException as e: client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) except RequestException as e: client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,459
Refine CSS for docsite, prepare for future theme upgrades
##### SUMMARY Follow-up to #61189 and #58832. Make sure all Ansible-specific CSS is separated from the underlying Sphinx theme. Then re-minify the CSS for the docsite. ##### ISSUE TYPE - Documentation Report ##### COMPONENT NAME docs.ansible.comb ##### ANSIBLE VERSION 2.9 ##### CONFIGURATION N/A ##### OS / ENVIRONMENT N/A
https://github.com/ansible/ansible/issues/61459
https://github.com/ansible/ansible/pull/61792
736938625bdd686739b7326f054b29adafefd71a
7efaad711e769f1f4ea04e763d7c5796324e1db9
2019-08-28T15:50:30Z
python
2019-09-04T17:57:05Z
docs/docsite/_static/ansible.css
/* Fix for read the docs theme: * https://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html */ /* override table width restrictions */ @media screen and (min-width: 767px) { /* If we ever publish to read the docs, we need to use !important for these * two styles as read the docs itself loads their theme in a way that we * can't otherwise override it. */ .wy-table-responsive table td { white-space: normal; } .wy-table-responsive { overflow: visible; } } /* * We use the class documentation-table for attribute tables where the first * column is the name of an attribute and the second column is the description. */ /* These tables look like this: * * Attribute Name Description * -------------- ----------- * **NAME** This is a multi-line description * str/required that can span multiple lines * * With multiple paragraphs * -------------- ----------- * * **NAME** is given the class .value-name * str is given the class .value-type * / is given the class .value-separator * required is given the class .value-required */ /* The extra .rst-content is so this will override rtd theme */ .rst-content table.documentation-table td { vertical-align: top; } table.documentation-table td:first-child { white-space: nowrap; vertical-align: top; } table.documentation-table td:first-child p:first-child { font-weight: bold; display: inline; } /* This is now redundant with above position-based styling */ /* table.documentation-table .value-name { font-weight: bold; display: inline; } */ table.documentation-table .value-type { font-size: x-small; color: purple; display: inline; } table.documentation-table .value-separator { font-size: x-small; display: inline; } table.documentation-table .value-required { font-size: x-small; color: red; display: inline; } /* Ansible-specific CSS pulled out of rtd theme for 2.9 */ .DocSiteProduct-header { flex: 1; -webkit-flex: 1; padding: 20px; padding-top: 10px; padding-bottom: 20px; display: flex; display: -webkit-flex; flex-direction: column; -webkit-flex-direction: column; align-items: center; -webkit-align-items: center; justify-content: flex-start; -webkit-justify-content: flex-start; margin-left: 20px; margin-right: 20px; text-decoration: none; font-weight: 400; font-family: 'Open Sans', sans-serif; } .DocSiteProduct-header:active, .DocSiteProduct-header:focus { color: #fff; } .DocSiteProduct-header:visited { color: #fff; } .DocSiteProduct-header--core { font-size: 25px; background-color: #5bbdbf; border: 2px solid #5bbdbf; border-top-left-radius: 4px; border-top-right-radius: 4px; color: #fff; padding-left: 2px; margin-left: 2px; } .DocSiteProduct-headerAlign { width: 100%; } .DocSiteProduct-logo { width: 60px; height: 60px; margin-bottom: -9px; } .DocSiteProduct-logoText { margin-top: 6px; font-size: 25px; text-align: left; } .DocSiteProduct-CheckVersionPara { margin-left: 2px; padding-bottom: 4px; margin-right: 2px; margin-bottom: 10px; } /* Ansible color scheme */ .wy-side-nav-search { background-color: #5bbdbf; } .wy-nav-top { background-color: #5bbdbf; } .wy-menu-vertical header, .wy-menu-vertical p.caption { color: #5bbdbf; } .wy-menu-vertical a { padding: 0; } .wy-menu-vertical a.reference.internal { padding: 0.4045em 1.618em; } /* Override sphinx rtd theme max-with of 800px */ .wy-nav-content { max-width: 100%; } /* Override sphinx_rtd_theme - keeps left-nav from overwriting Documentation title */ .wy-nav-side { top: 45px; } .wy-grid-for-nav { /* Ansible - changed to relative to remove extraneous side scroll bar */ position: relative; } /* Ansible - remove so highlight indenting is correct */ .rst-content .highlighted { padding: 0 0px; } .DocSiteBanner { display: flex; display: -webkit-flex; justify-content: center; -webkit-justify-content: center; flex-wrap: wrap; -webkit-flex-wrap: wrap; margin-bottom: 25px; } .DocSiteBanner-imgWrapper { max-width: 100%; } th, td { min-width: 100px; } table { overflow-x: auto; display: block; max-width: 100%; } .documentation-table td.elbow-placeholder { border-left: 1px solid #000; border-top: 0px; width: 30px; min-width: 30px; } .documentation-table th, .documentation-table td { padding: 4px; border-left: 1px solid #000; border-top: 1px solid #000; } .documentation-table { border-right: 1px solid #000; border-bottom: 1px solid #000; } @media print { * { background: transparent !important; color: black !important; text-shadow: none !important; filter:none !important; -ms-filter: none !important; } #nav, a, a:visited { text-decoration: underline; } a[href]:after { content: " (" attr(href) ")"; } abbr[title]:after { content: " (" attr(title) ")"; } .ir a:after, a[href^="javascript:"]:after, a[href^="#"]:after { content: ""; } /* Don't show links for images, or javascript/internal links */ pre, blockquote { border: 0px solid #999; page-break-inside: avoid; } thead { display: table-header-group; } /* h5bp.com/t */ tr, img { page-break-inside: avoid; } img { max-width: 100% !important; } @page { margin: 0.5cm; } p, h2, h3 { orphans: 3; widows: 3; } h2, h3 { page-break-after: avoid; } .DocSiteBanner, #google_image_div { display: none !important; } } #sideBanner{ display: none; } .DocSite-globalNav { display: none; } .DocSite-sideNav { display: block; margin-bottom: 40px; } .DocSite-nav { display: none; } .ansibleNav { background: #000; padding: 0px 20px; width: auto; border-bottom: 1px solid #444; font-size: 14px; z-index: 1 } .ansibleNav ul { list-style: none; padding-left: 0px; margin-top: 0px; } .ansibleNav ul li{ padding: 7px 0px; border-bottom: 1px solid #444; } .ansibleNav ul li:last-child{ border: none; } .ansibleNav ul li a { color: #fff; text-decoration: none; text-transform: uppercase; padding: 6px 0px; } .ansibleNav ul li a:hover { color: #5bbdbf ; background: transparent; } @media screen and (min-width: 768px) { .DocSite-globalNav{ display: block; position: fixed; } #sideBanner{ display: block; } .DocSite-sideNav{ display: none; } .DocSite-nav { flex: initial; -webkit-flex: initial; display: flex; display: -webkit-flex; flex-direction: row; -webkit-flex-direction: row; justify-content: flex-start; -webkit-justify-content: flex-start; padding: 15px; background-color: #000; text-decoration: none; font-family: 'Open Sans', sans-serif; } .DocSiteNav-logo { width: 28px; height: 28px; margin-right: 8px; margin-top: -6px; position: fixed; z-index: 1; } .DocSiteNav-title { color: #fff; font-size: 20px; position: fixed; margin-left: 40px; margin-top: -4px; z-index: 1; } .ansibleNav{ height: 45px; width: 100%; font-size: 13px; padding: 0px 60px 0px 0px; } .ansibleNav ul { float: right; display: flex; flex-wrap: nowrap; margin-top: 13px; } .ansibleNav ul li{ padding: 0px; border-bottom: none; } .ansibleNav ul li a { color: #fff; text-decoration: none; text-transform: uppercase; padding: 8px 13px; } } @media screen and (min-width: 768px) { .DocSite-globalNav{ display: block } #sideBanner{ display: block; } .DocSite-sideNav{ display: none; } .DocSite-nav { flex: initial; -webkit-flex: initial; display: flex; display: -webkit-flex; flex-direction: row; -webkit-flex-direction: row; justify-content: flex-start; -webkit-justify-content: flex-start; padding: 15px; background-color: #000; text-decoration: none; font-family: 'Open Sans', sans-serif; } .DocSiteNav-logo { width: 28px; height: 28px; margin-right: 8px; margin-top: -6px; position: fixed; } .DocSiteNav-title { color: #fff; font-size: 20px; position: fixed; margin-left: 40px; margin-top: -4px; } .ansibleNav{ height: 45px; font-size: 13px; padding: 0px 60px 0px 0px; } .ansibleNav ul { float: right; display: flex; flex-wrap: nowrap; margin-top: 13px; } .ansibleNav ul li{ padding: 0px; border-bottom: none; } .ansibleNav ul li a { color: #fff; text-decoration: none; text-transform: uppercase; padding: 8px 13px; } }
closed
ansible/ansible
https://github.com/ansible/ansible
61,624
ansible-galaxy 2.9 role install truncates API URL
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY When trying to install a role, install fails and the role is not found. Investigation shows the role API URL being used to lookup the role information is being truncated in `lib/ansible/galaxy/api.py` line 274, potentially due to the change from `rstrip()` to `strip()` in `_urljoin()`. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ansible-galaxy ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.9.0b1 config file = /etc/ansible/ansible.cfg configured module search path = ['/home/calvin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/calvin/projects/ansible/lib/ansible executable location = /home/calvin/.local/share/virtualenvs/orion/bin/ansible python version = 3.6.9 (default, Jul 3 2019, 17:57:57) [GCC 8.3.1 20190223 (Red Hat 8.3.1-2)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` - Try to install any role from Galaxy <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> The role to be located and installed from Galaxy ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> The role is not found, even though it clearly exists. <!--- Paste verbatim command output between quotes --> ```paste below (orion) [calvin@localhost ansible]$ ansible-galaxy install geerlingguy.nginx - downloading role 'nginx', owned by geerlingguy [WARNING]: - geerlingguy.nginx was NOT installed successfully: - sorry, geerlingguy.nginx was not found on https://galaxy.ansible.com. ``` Additional evidence of where the problem comes from internally: ``` (orion) [calvin@localhost ansible]$ ansible-galaxy -vvv install --server=http://galaxy.ansible.com geerlingguy.docker [DEPRECATION WARNING]: Setting verbosity before the arg sub command is deprecated, set the verbosity after the sub command. This feature will be removed in version 2.13. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. ansible-galaxy 2.9.0b1 config file = /etc/ansible/ansible.cfg configured module search path = ['/home/calvin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/calvin/projects/ansible/lib/ansible executable location = /home/calvin/.local/share/virtualenvs/orion/bin/ansible-galaxy python version = 3.6.9 (default, Jul 3 2019, 17:57:57) [GCC 8.3.1 20190223 (Red Hat 8.3.1-2)] Using /etc/ansible/ansible.cfg as config file Processing role geerlingguy.docker - downloading role 'docker', owned by geerlingguy > /home/calvin/projects/ansible/lib/ansible/galaxy/api.py(274)lookup_role_by_name() -> url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles", (Pdb) l 269 display.display("- downloading role '%s', owned by %s" % (role_name, user_name)) 270 except Exception: 271 raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name) 272 273 import pdb; pdb.set_trace() 274 -> url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles", 275 "?owner__username=%s&name=%s" % (user_name, role_name))[:-1] 276 data = self._call_galaxy(url) 277 if len(data["results"]) != 0: 278 return data["results"][0] 279 return None (Pdb) n > /home/calvin/projects/ansible/lib/ansible/galaxy/api.py(275)lookup_role_by_name() -> "?owner__username=%s&name=%s" % (user_name, role_name))[:-1] (Pdb) n > /home/calvin/projects/ansible/lib/ansible/galaxy/api.py(276)lookup_role_by_name() -> data = self._call_galaxy(url) (Pdb) !url 'https://galaxy.ansible.com/api/v1/roles/?owner__username=geerlingguy&name=docke' ``` Note the truncated URL on the last line of this snippet
https://github.com/ansible/ansible/issues/61624
https://github.com/ansible/ansible/pull/61775
adfaefb7321616da2975c95c87a4973d77aea5c4
8214d188cf330477ef4e4c2f3cc38826fc5b652c
2019-08-30T19:08:12Z
python
2019-09-04T21:06:42Z
changelogs/fragments/61624-fix-galaxy-url-building.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,624
ansible-galaxy 2.9 role install truncates API URL
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY When trying to install a role, install fails and the role is not found. Investigation shows the role API URL being used to lookup the role information is being truncated in `lib/ansible/galaxy/api.py` line 274, potentially due to the change from `rstrip()` to `strip()` in `_urljoin()`. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ansible-galaxy ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.9.0b1 config file = /etc/ansible/ansible.cfg configured module search path = ['/home/calvin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/calvin/projects/ansible/lib/ansible executable location = /home/calvin/.local/share/virtualenvs/orion/bin/ansible python version = 3.6.9 (default, Jul 3 2019, 17:57:57) [GCC 8.3.1 20190223 (Red Hat 8.3.1-2)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` - Try to install any role from Galaxy <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> The role to be located and installed from Galaxy ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> The role is not found, even though it clearly exists. <!--- Paste verbatim command output between quotes --> ```paste below (orion) [calvin@localhost ansible]$ ansible-galaxy install geerlingguy.nginx - downloading role 'nginx', owned by geerlingguy [WARNING]: - geerlingguy.nginx was NOT installed successfully: - sorry, geerlingguy.nginx was not found on https://galaxy.ansible.com. ``` Additional evidence of where the problem comes from internally: ``` (orion) [calvin@localhost ansible]$ ansible-galaxy -vvv install --server=http://galaxy.ansible.com geerlingguy.docker [DEPRECATION WARNING]: Setting verbosity before the arg sub command is deprecated, set the verbosity after the sub command. This feature will be removed in version 2.13. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. ansible-galaxy 2.9.0b1 config file = /etc/ansible/ansible.cfg configured module search path = ['/home/calvin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/calvin/projects/ansible/lib/ansible executable location = /home/calvin/.local/share/virtualenvs/orion/bin/ansible-galaxy python version = 3.6.9 (default, Jul 3 2019, 17:57:57) [GCC 8.3.1 20190223 (Red Hat 8.3.1-2)] Using /etc/ansible/ansible.cfg as config file Processing role geerlingguy.docker - downloading role 'docker', owned by geerlingguy > /home/calvin/projects/ansible/lib/ansible/galaxy/api.py(274)lookup_role_by_name() -> url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles", (Pdb) l 269 display.display("- downloading role '%s', owned by %s" % (role_name, user_name)) 270 except Exception: 271 raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name) 272 273 import pdb; pdb.set_trace() 274 -> url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles", 275 "?owner__username=%s&name=%s" % (user_name, role_name))[:-1] 276 data = self._call_galaxy(url) 277 if len(data["results"]) != 0: 278 return data["results"][0] 279 return None (Pdb) n > /home/calvin/projects/ansible/lib/ansible/galaxy/api.py(275)lookup_role_by_name() -> "?owner__username=%s&name=%s" % (user_name, role_name))[:-1] (Pdb) n > /home/calvin/projects/ansible/lib/ansible/galaxy/api.py(276)lookup_role_by_name() -> data = self._call_galaxy(url) (Pdb) !url 'https://galaxy.ansible.com/api/v1/roles/?owner__username=geerlingguy&name=docke' ``` Note the truncated URL on the last line of this snippet
https://github.com/ansible/ansible/issues/61624
https://github.com/ansible/ansible/pull/61775
adfaefb7321616da2975c95c87a4973d77aea5c4
8214d188cf330477ef4e4c2f3cc38826fc5b652c
2019-08-30T19:08:12Z
python
2019-09-04T21:06:42Z
lib/ansible/galaxy/api.py
# (C) 2013, James Cammarata <[email protected]> # Copyright: (c) 2019, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import base64 import json import os import tarfile import uuid import time from ansible import context from ansible.errors import AnsibleError from ansible.module_utils.six import string_types from ansible.module_utils.six.moves.urllib.error import HTTPError from ansible.module_utils.six.moves.urllib.parse import quote as urlquote, urlencode from ansible.module_utils._text import to_bytes, to_native, to_text from ansible.module_utils.urls import open_url from ansible.utils.display import Display from ansible.utils.hashing import secure_hash_s display = Display() def g_connect(versions): """ Wrapper to lazily initialize connection info to Galaxy and verify the API versions required are available on the endpoint. :param versions: A list of API versions that the function supports. """ def decorator(method): def wrapped(self, *args, **kwargs): if not self._available_api_versions: display.vvvv("Initial connection to galaxy_server: %s" % self.api_server) # Determine the type of Galaxy server we are talking to. First try it unauthenticated then with Bearer # auth for Automation Hub. n_url = _urljoin(self.api_server, 'api') error_context_msg = 'Error when finding available api versions from %s (%s)' % (self.name, n_url) try: data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg) except GalaxyError as e: if e.http_code != 401: raise # Assume this is v3 (Automation Hub) and auth is required headers = {} self._add_auth_token(headers, n_url, token_type='Bearer', required=True) data = self._call_galaxy(n_url, headers=headers, method='GET', error_context_msg=error_context_msg) # Default to only supporting v1, if only v1 is returned we also assume that v2 is available even though # it isn't returned in the available_versions dict. available_versions = data.get('available_versions', {u'v1': u'/api/v1'}) if list(available_versions.keys()) == [u'v1']: available_versions[u'v2'] = u'/api/v2' self._available_api_versions = available_versions display.vvvv("Found API version '%s' with Galaxy server %s (%s)" % (', '.join(available_versions.keys()), self.name, self.api_server)) # Verify that the API versions the function works with are available on the server specified. available_versions = set(self._available_api_versions.keys()) common_versions = set(versions).intersection(available_versions) if not common_versions: raise AnsibleError("Galaxy action %s requires API versions '%s' but only '%s' are available on %s %s" % (method.__name__, ", ".join(versions), ", ".join(available_versions), self.name, self.api_server)) return method(self, *args, **kwargs) return wrapped return decorator def _urljoin(*args): return '/'.join(to_native(a, errors='surrogate_or_strict').strip('/') for a in args + ('',) if a) class GalaxyError(AnsibleError): """ Error for bad Galaxy server responses. """ def __init__(self, http_error, message): super(GalaxyError, self).__init__(message) self.http_code = http_error.code self.url = http_error.geturl() try: http_msg = to_text(http_error.read()) err_info = json.loads(http_msg) except (AttributeError, ValueError): err_info = {} url_split = self.url.split('/') if 'v2' in url_split: galaxy_msg = err_info.get('message', 'Unknown error returned by Galaxy server.') code = err_info.get('code', 'Unknown') full_error_msg = u"%s (HTTP Code: %d, Message: %s Code: %s)" % (message, self.http_code, galaxy_msg, code) elif 'v3' in url_split: errors = err_info.get('errors', []) if not errors: errors = [{}] # Defaults are set below, we just need to make sure 1 error is present. message_lines = [] for error in errors: error_msg = error.get('detail') or error.get('title') or 'Unknown error returned by Galaxy server.' error_code = error.get('code') or 'Unknown' message_line = u"(HTTP Code: %d, Message: %s Code: %s)" % (self.http_code, error_msg, error_code) message_lines.append(message_line) full_error_msg = "%s %s" % (message, ', '.join(message_lines)) else: # v1 and unknown API endpoints galaxy_msg = err_info.get('default', 'Unknown error returned by Galaxy server.') full_error_msg = u"%s (HTTP Code: %d, Message: %s)" % (message, self.http_code, galaxy_msg) self.message = to_native(full_error_msg) class CollectionVersionMetadata: def __init__(self, namespace, name, version, download_url, artifact_sha256, dependencies): """ Contains common information about a collection on a Galaxy server to smooth through API differences for Collection and define a standard meta info for a collection. :param namespace: The namespace name. :param name: The collection name. :param version: The version that the metadata refers to. :param download_url: The URL to download the collection. :param artifact_sha256: The SHA256 of the collection artifact for later verification. :param dependencies: A dict of dependencies of the collection. """ self.namespace = namespace self.name = name self.version = version self.download_url = download_url self.artifact_sha256 = artifact_sha256 self.dependencies = dependencies class GalaxyAPI: """ This class is meant to be used as a API client for an Ansible Galaxy server """ def __init__(self, galaxy, name, url, username=None, password=None, token=None): self.galaxy = galaxy self.name = name self.username = username self.password = password self.token = token self.api_server = url self.validate_certs = not context.CLIARGS['ignore_certs'] self._available_api_versions = {} display.debug('Validate TLS certificates for %s: %s' % (self.api_server, self.validate_certs)) @property @g_connect(['v1', 'v2', 'v3']) def available_api_versions(self): # Calling g_connect will populate self._available_api_versions return self._available_api_versions def _call_galaxy(self, url, args=None, headers=None, method=None, auth_required=False, error_context_msg=None): headers = headers or {} self._add_auth_token(headers, url, required=auth_required) try: display.vvvv("Calling Galaxy at %s" % url) resp = open_url(to_native(url), data=args, validate_certs=self.validate_certs, headers=headers, method=method, timeout=20, unredirected_headers=['Authorization']) except HTTPError as e: raise GalaxyError(e, error_context_msg) except Exception as e: raise AnsibleError("Unknown error when attempting to call Galaxy at '%s': %s" % (url, to_native(e))) resp_data = to_text(resp.read(), errors='surrogate_or_strict') try: data = json.loads(resp_data) except ValueError: raise AnsibleError("Failed to parse Galaxy response from '%s' as JSON:\n%s" % (resp.url, to_native(resp_data))) return data def _add_auth_token(self, headers, url, token_type=None, required=False): # Don't add the auth token if one is already present if 'Authorization' in headers: return token = self.token.get() if self.token else None # 'Token' for v2 api, 'Bearer' for v3 but still allow someone to override the token if necessary. is_v3 = 'v3' in url.split('/') token_type = token_type or ('Bearer' if is_v3 else 'Token') if token: headers['Authorization'] = '%s %s' % (token_type, token) elif self.username: token = "%s:%s" % (to_text(self.username, errors='surrogate_or_strict'), to_text(self.password, errors='surrogate_or_strict', nonstring='passthru') or '') b64_val = base64.b64encode(to_bytes(token, encoding='utf-8', errors='surrogate_or_strict')) headers['Authorization'] = 'Basic %s' % to_text(b64_val) elif required: raise AnsibleError("No access token or username set. A token can be set with --api-key, with " "'ansible-galaxy login', or set in ansible.cfg.") @g_connect(['v1']) def authenticate(self, github_token): """ Retrieve an authentication token """ url = _urljoin(self.api_server, self.available_api_versions['v1'], "tokens") + '/' args = urlencode({"github_token": github_token}) resp = open_url(url, data=args, validate_certs=self.validate_certs, method="POST") data = json.loads(to_text(resp.read(), errors='surrogate_or_strict')) return data @g_connect(['v1']) def create_import_task(self, github_user, github_repo, reference=None, role_name=None): """ Post an import request """ url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports") + '/' args = { "github_user": github_user, "github_repo": github_repo, "github_reference": reference if reference else "" } if role_name: args['alternate_role_name'] = role_name elif github_repo.startswith('ansible-role'): args['alternate_role_name'] = github_repo[len('ansible-role') + 1:] data = self._call_galaxy(url, args=urlencode(args), method="POST") if data.get('results', None): return data['results'] return data @g_connect(['v1']) def get_import_task(self, task_id=None, github_user=None, github_repo=None): """ Check the status of an import task. """ url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports") if task_id is not None: url = "%s?id=%d" % (url, task_id) elif github_user is not None and github_repo is not None: url = "%s?github_user=%s&github_repo=%s" % (url, github_user, github_repo) else: raise AnsibleError("Expected task_id or github_user and github_repo") data = self._call_galaxy(url) return data['results'] @g_connect(['v1']) def lookup_role_by_name(self, role_name, notify=True): """ Find a role by name. """ role_name = to_text(urlquote(to_bytes(role_name))) try: parts = role_name.split(".") user_name = ".".join(parts[0:-1]) role_name = parts[-1] if notify: display.display("- downloading role '%s', owned by %s" % (role_name, user_name)) except Exception: raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name) url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles", "?owner__username=%s&name=%s" % (user_name, role_name))[:-1] data = self._call_galaxy(url) if len(data["results"]) != 0: return data["results"][0] return None @g_connect(['v1']) def fetch_role_related(self, related, role_id): """ Fetch the list of related items for the given role. The url comes from the 'related' field of the role. """ results = [] try: url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles", role_id, related, "?page_size=50")[:-1] data = self._call_galaxy(url) results = data['results'] done = (data.get('next_link', None) is None) while not done: url = _urljoin(self.api_server, data['next_link']) data = self._call_galaxy(url) results += data['results'] done = (data.get('next_link', None) is None) except Exception as e: display.vvvv("Unable to retrive role (id=%s) data (%s), but this is not fatal so we continue: %s" % (role_id, related, to_text(e))) return results @g_connect(['v1']) def get_list(self, what): """ Fetch the list of items specified. """ try: url = _urljoin(self.api_server, self.available_api_versions['v1'], what, "?page_size")[:-1] data = self._call_galaxy(url) if "results" in data: results = data['results'] else: results = data done = True if "next" in data: done = (data.get('next_link', None) is None) while not done: url = _urljoin(self.api_server, data['next_link']) data = self._call_galaxy(url) results += data['results'] done = (data.get('next_link', None) is None) return results except Exception as error: raise AnsibleError("Failed to download the %s list: %s" % (what, to_native(error))) @g_connect(['v1']) def search_roles(self, search, **kwargs): search_url = _urljoin(self.api_server, self.available_api_versions['v1'], "search", "roles", "?")[:-1] if search: search_url += '&autocomplete=' + to_text(urlquote(to_bytes(search))) tags = kwargs.get('tags', None) platforms = kwargs.get('platforms', None) page_size = kwargs.get('page_size', None) author = kwargs.get('author', None) if tags and isinstance(tags, string_types): tags = tags.split(',') search_url += '&tags_autocomplete=' + '+'.join(tags) if platforms and isinstance(platforms, string_types): platforms = platforms.split(',') search_url += '&platforms_autocomplete=' + '+'.join(platforms) if page_size: search_url += '&page_size=%s' % page_size if author: search_url += '&username_autocomplete=%s' % author data = self._call_galaxy(search_url) return data @g_connect(['v1']) def add_secret(self, source, github_user, github_repo, secret): url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets") + '/' args = urlencode({ "source": source, "github_user": github_user, "github_repo": github_repo, "secret": secret }) data = self._call_galaxy(url, args=args, method="POST") return data @g_connect(['v1']) def list_secrets(self): url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets") data = self._call_galaxy(url, auth_required=True) return data @g_connect(['v1']) def remove_secret(self, secret_id): url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets", secret_id) + '/' data = self._call_galaxy(url, auth_required=True, method='DELETE') return data @g_connect(['v1']) def delete_role(self, github_user, github_repo): url = _urljoin(self.api_server, self.available_api_versions['v1'], "removerole", "?github_user=%s&github_repo=%s" % (github_user, github_repo))[:-1] data = self._call_galaxy(url, auth_required=True, method='DELETE') return data # Collection APIs # @g_connect(['v2', 'v3']) def publish_collection(self, collection_path): """ Publishes a collection to a Galaxy server and returns the import task URI. :param collection_path: The path to the collection tarball to publish. :return: The import task URI that contains the import results. """ display.display("Publishing collection artifact '%s' to %s %s" % (collection_path, self.name, self.api_server)) b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict') if not os.path.exists(b_collection_path): raise AnsibleError("The collection path specified '%s' does not exist." % to_native(collection_path)) elif not tarfile.is_tarfile(b_collection_path): raise AnsibleError("The collection path specified '%s' is not a tarball, use 'ansible-galaxy collection " "build' to create a proper release artifact." % to_native(collection_path)) with open(b_collection_path, 'rb') as collection_tar: data = collection_tar.read() boundary = '--------------------------%s' % uuid.uuid4().hex b_file_name = os.path.basename(b_collection_path) part_boundary = b"--" + to_bytes(boundary, errors='surrogate_or_strict') form = [ part_boundary, b"Content-Disposition: form-data; name=\"sha256\"", to_bytes(secure_hash_s(data), errors='surrogate_or_strict'), part_boundary, b"Content-Disposition: file; name=\"file\"; filename=\"%s\"" % b_file_name, b"Content-Type: application/octet-stream", b"", data, b"%s--" % part_boundary, ] data = b"\r\n".join(form) headers = { 'Content-type': 'multipart/form-data; boundary=%s' % boundary, 'Content-length': len(data), } if 'v3' in self.available_api_versions: n_url = _urljoin(self.api_server, self.available_api_versions['v3'], 'artifacts', 'collections') + '/' else: n_url = _urljoin(self.api_server, self.available_api_versions['v2'], 'collections') + '/' resp = self._call_galaxy(n_url, args=data, headers=headers, method='POST', auth_required=True, error_context_msg='Error when publishing collection to %s (%s)' % (self.name, self.api_server)) return resp['task'] @g_connect(['v2', 'v3']) def wait_import_task(self, task_url, timeout=0): """ Waits until the import process on the Galaxy server has completed or the timeout is reached. :param task_url: The full URI of the import task to wait for, this is returned by publish_collection. :param timeout: The timeout in seconds, 0 is no timeout. """ # TODO: actually verify that v3 returns the same structure as v2, right now this is just an assumption. state = 'waiting' data = None display.display("Waiting until Galaxy import task %s has completed" % task_url) start = time.time() wait = 2 while timeout == 0 or (time.time() - start) < timeout: data = self._call_galaxy(task_url, method='GET', auth_required=True, error_context_msg='Error when getting import task results at %s' % task_url) state = data.get('state', 'waiting') if data.get('finished_at', None): break display.vvv('Galaxy import process has a status of %s, wait %d seconds before trying again' % (state, wait)) time.sleep(wait) # poor man's exponential backoff algo so we don't flood the Galaxy API, cap at 30 seconds. wait = min(30, wait * 1.5) if state == 'waiting': raise AnsibleError("Timeout while waiting for the Galaxy import process to finish, check progress at '%s'" % to_native(task_url)) for message in data.get('messages', []): level = message['level'] if level == 'error': display.error("Galaxy import error message: %s" % message['message']) elif level == 'warning': display.warning("Galaxy import warning message: %s" % message['message']) else: display.vvv("Galaxy import message: %s - %s" % (level, message['message'])) if state == 'failed': code = to_native(data['error'].get('code', 'UNKNOWN')) description = to_native( data['error'].get('description', "Unknown error, see %s for more details" % task_url)) raise AnsibleError("Galaxy import process failed: %s (Code: %s)" % (description, code)) @g_connect(['v2', 'v3']) def get_collection_version_metadata(self, namespace, name, version): """ Gets the collection information from the Galaxy server about a specific Collection version. :param namespace: The collection namespace. :param name: The collection name. :param version: Optional version of the collection to get the information for. :return: CollectionVersionMetadata about the collection at the version requested. """ api_path = self.available_api_versions.get('v3', self.available_api_versions.get('v2')) url_paths = [self.api_server, api_path, 'collections', namespace, name, 'versions', version] n_collection_url = _urljoin(*url_paths) error_context_msg = 'Error when getting collection version metadata for %s.%s:%s from %s (%s)' \ % (namespace, name, version, self.name, self.api_server) data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg) return CollectionVersionMetadata(data['namespace']['name'], data['collection']['name'], data['version'], data['download_url'], data['artifact']['sha256'], data['metadata']['dependencies']) @g_connect(['v2', 'v3']) def get_collection_versions(self, namespace, name): """ Gets a list of available versions for a collection on a Galaxy server. :param namespace: The collection namespace. :param name: The collection name. :return: A list of versions that are available. """ if 'v3' in self.available_api_versions: api_path = self.available_api_versions['v3'] results_key = 'data' pagination_path = ['links', 'next'] else: api_path = self.available_api_versions['v2'] results_key = 'results' pagination_path = ['next'] n_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, 'versions') error_context_msg = 'Error when getting available collection versions for %s.%s from %s (%s)' \ % (namespace, name, self.name, self.api_server) data = self._call_galaxy(n_url, error_context_msg=error_context_msg) versions = [] while True: versions += [v['version'] for v in data[results_key]] next_link = data for path in pagination_path: next_link = next_link.get(path, {}) if not next_link: break data = self._call_galaxy(to_native(next_link, errors='surrogate_or_strict'), error_context_msg=error_context_msg) return versions
closed
ansible/ansible
https://github.com/ansible/ansible
61,609
ansible-galaxy 2.9 role install ignores --server option
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY Role install in the current `stable-2.9` branch attempts to install from `http://galaxy.ansible.com/` ignoring anything passed to `--server`. Other role commands, like `import`, seem to respect the server parameter properly. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ansible-galaxy ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.9.0b1 config file = /etc/ansible/ansible.cfg configured module search path = ['/home/calvin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/calvin/projects/ansible/lib/ansible executable location = /home/calvin/.local/share/virtualenvs/orion/bin/ansible python version = 3.6.9 (default, Jul 3 2019, 17:57:57) [GCC 8.3.1 20190223 (Red Hat 8.3.1-2)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Fedora 29 ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ansible-galaxy -vvv install --server=http://galaxy-dev.ansible.com orionuser1.ansible_role_logstash ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Expect the given role to be installed from the specified Galaxy server, rather than the default production instance. Especially for automated tests which we do not want to run against production. ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below (orion) [calvin@localhost ansible]$ ansible-galaxy -vvv install --server=http://galaxy-dev.ansible.com orionuser1.ansible_role_logstash [DEPRECATION WARNING]: Setting verbosity before the arg sub command is deprecated, set the verbosity after the sub command. This feature will be removed in version 2.13. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. ansible-galaxy 2.9.0b1 config file = /etc/ansible/ansible.cfg configured module search path = ['/home/calvin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/calvin/projects/ansible/lib/ansible executable location = /home/calvin/.local/share/virtualenvs/orion/bin/ansible-galaxy python version = 3.6.9 (default, Jul 3 2019, 17:57:57) [GCC 8.3.1 20190223 (Red Hat 8.3.1-2)] Using /etc/ansible/ansible.cfg as config file Processing role orionuser1.ansible_role_logstash - downloading role 'ansible_role_logstash', owned by orionuser1 [WARNING]: - orionuser1.ansible_role_logstash was NOT installed successfully: - sorry, orionuser1.ansible_role_logstash was not found on https://galaxy.ansible.com. ERROR! - you can use --ignore-errors to skip failed roles and finish processing the list. ```
https://github.com/ansible/ansible/issues/61609
https://github.com/ansible/ansible/pull/61820
f81b7dd10a674c72659b25be20461865bb997eed
3a7b77a94ce534c502828ebd2959f6fdf9d183f5
2019-08-30T16:03:22Z
python
2019-09-05T01:46:44Z
changelogs/fragments/ansible-galaxy-role-server.yaml
closed
ansible/ansible
https://github.com/ansible/ansible
61,609
ansible-galaxy 2.9 role install ignores --server option
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY Role install in the current `stable-2.9` branch attempts to install from `http://galaxy.ansible.com/` ignoring anything passed to `--server`. Other role commands, like `import`, seem to respect the server parameter properly. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ansible-galaxy ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.9.0b1 config file = /etc/ansible/ansible.cfg configured module search path = ['/home/calvin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/calvin/projects/ansible/lib/ansible executable location = /home/calvin/.local/share/virtualenvs/orion/bin/ansible python version = 3.6.9 (default, Jul 3 2019, 17:57:57) [GCC 8.3.1 20190223 (Red Hat 8.3.1-2)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Fedora 29 ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ansible-galaxy -vvv install --server=http://galaxy-dev.ansible.com orionuser1.ansible_role_logstash ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Expect the given role to be installed from the specified Galaxy server, rather than the default production instance. Especially for automated tests which we do not want to run against production. ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below (orion) [calvin@localhost ansible]$ ansible-galaxy -vvv install --server=http://galaxy-dev.ansible.com orionuser1.ansible_role_logstash [DEPRECATION WARNING]: Setting verbosity before the arg sub command is deprecated, set the verbosity after the sub command. This feature will be removed in version 2.13. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. ansible-galaxy 2.9.0b1 config file = /etc/ansible/ansible.cfg configured module search path = ['/home/calvin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/calvin/projects/ansible/lib/ansible executable location = /home/calvin/.local/share/virtualenvs/orion/bin/ansible-galaxy python version = 3.6.9 (default, Jul 3 2019, 17:57:57) [GCC 8.3.1 20190223 (Red Hat 8.3.1-2)] Using /etc/ansible/ansible.cfg as config file Processing role orionuser1.ansible_role_logstash - downloading role 'ansible_role_logstash', owned by orionuser1 [WARNING]: - orionuser1.ansible_role_logstash was NOT installed successfully: - sorry, orionuser1.ansible_role_logstash was not found on https://galaxy.ansible.com. ERROR! - you can use --ignore-errors to skip failed roles and finish processing the list. ```
https://github.com/ansible/ansible/issues/61609
https://github.com/ansible/ansible/pull/61820
f81b7dd10a674c72659b25be20461865bb997eed
3a7b77a94ce534c502828ebd2959f6fdf9d183f5
2019-08-30T16:03:22Z
python
2019-09-05T01:46:44Z
lib/ansible/cli/galaxy.py
# Copyright: (c) 2013, James Cammarata <[email protected]> # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os.path import re import shutil import textwrap import time import yaml from jinja2 import BaseLoader, Environment, FileSystemLoader from yaml.error import YAMLError import ansible.constants as C from ansible import context from ansible.cli import CLI from ansible.cli.arguments import option_helpers as opt_help from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info from ansible.galaxy.api import GalaxyAPI from ansible.galaxy.collection import build_collection, install_collections, publish_collection, \ validate_collection_name from ansible.galaxy.login import GalaxyLogin from ansible.galaxy.role import GalaxyRole from ansible.galaxy.token import GalaxyToken, NoTokenSentinel from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils._text import to_bytes, to_native, to_text from ansible.parsing.yaml.loader import AnsibleLoader from ansible.playbook.role.requirement import RoleRequirement from ansible.utils.display import Display from ansible.utils.plugin_docs import get_versioned_doclink display = Display() class GalaxyCLI(CLI): '''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.''' SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url") def __init__(self, args): # Inject role into sys.argv[1] as a backwards compatibility step if len(args) > 1 and args[1] not in ['-h', '--help'] and 'role' not in args and 'collection' not in args: # TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice # Remove this in Ansible 2.13 when we also remove -v as an option on the root parser for ansible-galaxy. idx = 2 if args[1].startswith('-v') else 1 args.insert(idx, 'role') self.api_servers = [] self.galaxy = None super(GalaxyCLI, self).__init__(args) def init_parser(self): ''' create an options parser for bin/ansible ''' super(GalaxyCLI, self).init_parser( desc="Perform various Role and Collection related operations.", ) # Common arguments that apply to more than 1 action common = opt_help.argparse.ArgumentParser(add_help=False) common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL') common.add_argument('--api-key', dest='api_key', help='The Ansible Galaxy API key which can be found at ' 'https://galaxy.ansible.com/me/preferences. You can also use ansible-galaxy login to ' 'retrieve this key or set the token for the GALAXY_SERVER_LIST entry.') common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.') opt_help.add_verbosity_options(common) force = opt_help.argparse.ArgumentParser(add_help=False) force.add_argument('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role or collection') github = opt_help.argparse.ArgumentParser(add_help=False) github.add_argument('github_user', help='GitHub username') github.add_argument('github_repo', help='GitHub repository') offline = opt_help.argparse.ArgumentParser(add_help=False) offline.add_argument('--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles") default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '') roles_path = opt_help.argparse.ArgumentParser(add_help=False) roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True), default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction, help='The path to the directory containing your roles. The default is the first ' 'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path) # Add sub parser for the Galaxy role type (role or collection) type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type') type_parser.required = True # Add sub parser for the Galaxy collection actions collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.') collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action') collection_parser.required = True self.add_init_options(collection_parser, parents=[common, force]) self.add_build_options(collection_parser, parents=[common, force]) self.add_publish_options(collection_parser, parents=[common]) self.add_install_options(collection_parser, parents=[common, force]) # Add sub parser for the Galaxy role actions role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.') role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action') role_parser.required = True self.add_init_options(role_parser, parents=[common, force, offline]) self.add_remove_options(role_parser, parents=[common, roles_path]) self.add_delete_options(role_parser, parents=[common, github]) self.add_list_options(role_parser, parents=[common, roles_path]) self.add_search_options(role_parser, parents=[common]) self.add_import_options(role_parser, parents=[common, github]) self.add_setup_options(role_parser, parents=[common, roles_path]) self.add_login_options(role_parser, parents=[common]) self.add_info_options(role_parser, parents=[common, roles_path, offline]) self.add_install_options(role_parser, parents=[common, force, roles_path]) def add_init_options(self, parser, parents=None): galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role' init_parser = parser.add_parser('init', parents=parents, help='Initialize new {0} with the base structure of a ' '{0}.'.format(galaxy_type)) init_parser.set_defaults(func=self.execute_init) init_parser.add_argument('--init-path', dest='init_path', default='./', help='The path in which the skeleton {0} will be created. The default is the ' 'current working directory.'.format(galaxy_type)) init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type), default=C.GALAXY_ROLE_SKELETON, help='The path to a {0} skeleton that the new {0} should be based ' 'upon.'.format(galaxy_type)) obj_name_kwargs = {} if galaxy_type == 'collection': obj_name_kwargs['type'] = validate_collection_name init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()), **obj_name_kwargs) if galaxy_type == 'role': init_parser.add_argument('--type', dest='role_type', action='store', default='default', help="Initialize using an alternate role type. Valid types include: 'container', " "'apb' and 'network'.") def add_remove_options(self, parser, parents=None): remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.') remove_parser.set_defaults(func=self.execute_remove) remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+') def add_delete_options(self, parser, parents=None): delete_parser = parser.add_parser('delete', parents=parents, help='Removes the role from Galaxy. It does not remove or alter the actual ' 'GitHub repository.') delete_parser.set_defaults(func=self.execute_delete) def add_list_options(self, parser, parents=None): list_parser = parser.add_parser('list', parents=parents, help='Show the name and version of each role installed in the roles_path.') list_parser.set_defaults(func=self.execute_list) list_parser.add_argument('role', help='Role', nargs='?', metavar='role') def add_search_options(self, parser, parents=None): search_parser = parser.add_parser('search', parents=parents, help='Search the Galaxy database by tags, platforms, author and multiple ' 'keywords.') search_parser.set_defaults(func=self.execute_search) search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by') search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by') search_parser.add_argument('--author', dest='author', help='GitHub username') search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*') def add_import_options(self, parser, parents=None): import_parser = parser.add_parser('import', parents=parents, help='Import a role') import_parser.set_defaults(func=self.execute_import) import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True, help="Don't wait for import results.") import_parser.add_argument('--branch', dest='reference', help='The name of a branch to import. Defaults to the repository\'s default branch ' '(usually master)') import_parser.add_argument('--role-name', dest='role_name', help='The name the role should have, if different than the repo name') import_parser.add_argument('--status', dest='check_status', action='store_true', default=False, help='Check the status of the most recent import request for given github_' 'user/github_repo.') def add_setup_options(self, parser, parents=None): setup_parser = parser.add_parser('setup', parents=parents, help='Manage the integration between Galaxy and the given source.') setup_parser.set_defaults(func=self.execute_setup) setup_parser.add_argument('--remove', dest='remove_id', default=None, help='Remove the integration matching the provided ID value. Use --list to see ' 'ID values.') setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.') setup_parser.add_argument('source', help='Source') setup_parser.add_argument('github_user', help='GitHub username') setup_parser.add_argument('github_repo', help='GitHub repository') setup_parser.add_argument('secret', help='Secret') def add_login_options(self, parser, parents=None): login_parser = parser.add_parser('login', parents=parents, help="Login to api.github.com server in order to use ansible-galaxy role sub " "command such as 'import', 'delete', 'publish', and 'setup'") login_parser.set_defaults(func=self.execute_login) login_parser.add_argument('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.') def add_info_options(self, parser, parents=None): info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.') info_parser.set_defaults(func=self.execute_info) info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]') def add_install_options(self, parser, parents=None): galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role' args_kwargs = {} if galaxy_type == 'collection': args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \ 'mutually exclusive with --requirements-file.' ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \ 'collection. This will not ignore dependency conflict errors.' else: args_kwargs['help'] = 'Role name, URL or tar file' ignore_errors_help = 'Ignore errors and continue with the next specified role.' install_parser = parser.add_parser('install', parents=parents, help='Install {0}(s) from file(s), URL(s) or Ansible ' 'Galaxy'.format(galaxy_type)) install_parser.set_defaults(func=self.execute_install) install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs) install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, help=ignore_errors_help) install_exclusive = install_parser.add_mutually_exclusive_group() install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help="Don't download {0}s listed as dependencies.".format(galaxy_type)) install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False, help="Force overwriting an existing {0} and its " "dependencies.".format(galaxy_type)) if galaxy_type == 'collection': install_parser.add_argument('-p', '--collections-path', dest='collections_path', required=True, help='The path to the directory containing your collections.') install_parser.add_argument('-r', '--requirements-file', dest='requirements', help='A file containing a list of collections to be installed.') else: install_parser.add_argument('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported.') install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true', default=False, help='Use tar instead of the scm archive option when packaging the role.') def add_build_options(self, parser, parents=None): build_parser = parser.add_parser('build', parents=parents, help='Build an Ansible collection artifact that can be publish to Ansible ' 'Galaxy.') build_parser.set_defaults(func=self.execute_build) build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',), help='Path to the collection(s) directory to build. This should be the directory ' 'that contains the galaxy.yml file. The default is the current working ' 'directory.') build_parser.add_argument('--output-path', dest='output_path', default='./', help='The path in which the collection is built to. The default is the current ' 'working directory.') def add_publish_options(self, parser, parents=None): publish_parser = parser.add_parser('publish', parents=parents, help='Publish a collection artifact to Ansible Galaxy.') publish_parser.set_defaults(func=self.execute_publish) publish_parser.add_argument('args', metavar='collection_path', help='The path to the collection tarball to publish.') publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True, help="Don't wait for import validation results.") publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0, help="The time to wait for the collection import process to finish.") def post_process_args(self, options): options = super(GalaxyCLI, self).post_process_args(options) display.verbosity = options.verbosity return options def run(self): super(GalaxyCLI, self).run() self.galaxy = Galaxy() def server_config_def(section, key, required): return { 'description': 'The %s of the %s Galaxy server' % (key, section), 'ini': [ { 'section': 'galaxy_server.%s' % section, 'key': key, } ], 'env': [ {'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())}, ], 'required': required, } server_def = [('url', True), ('username', False), ('password', False), ('token', False)] config_servers = [] for server_key in (C.GALAXY_SERVER_LIST or []): # Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the # section [galaxy_server.<server>] for the values url, username, password, and token. config_dict = dict((k, server_config_def(server_key, k, req)) for k, req in server_def) defs = AnsibleLoader(yaml.safe_dump(config_dict)).get_single_data() C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs) server_options = C.config.get_plugin_options('galaxy_server', server_key) token_val = server_options['token'] or NoTokenSentinel server_options['token'] = GalaxyToken(token=token_val) config_servers.append(GalaxyAPI(self.galaxy, server_key, **server_options)) cmd_server = context.CLIARGS['api_server'] cmd_token = GalaxyToken(token=context.CLIARGS['api_key']) if cmd_server: # Cmd args take precedence over the config entry but fist check if the arg was a name and use that config # entry, otherwise create a new API entry for the server specified. config_server = next((s for s in config_servers if s.name == cmd_server), None) if config_server: self.api_servers.append(config_server) else: self.api_servers.append(GalaxyAPI(self.galaxy, 'cmd_arg', cmd_server, token=cmd_token)) else: self.api_servers = config_servers # Default to C.GALAXY_SERVER if no servers were defined if len(self.api_servers) == 0: self.api_servers.append(GalaxyAPI(self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token)) context.CLIARGS['func']() @property def api(self): return self.api_servers[0] def _parse_requirements_file(self, requirements_file, allow_old_format=True): """ Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2 requirements file format: # v1 (roles only) - src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball. name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL. scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git. version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master. include: Path to additional requirements.yml files. # v2 (roles and collections) --- roles: # Same as v1 format just under the roles key collections: - namespace.collection - name: namespace.collection version: version identifier, multiple identifiers are separated by ',' source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST :param requirements_file: The path to the requirements file. :param allow_old_format: Will fail if a v1 requirements file is found and this is set to False. :return: a dict containing roles and collections to found in the requirements file. """ requirements = { 'roles': [], 'collections': [], } b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict') if not os.path.exists(b_requirements_file): raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file)) display.vvv("Reading requirement file at '%s'" % requirements_file) with open(b_requirements_file, 'rb') as req_obj: try: file_requirements = yaml.safe_load(req_obj) except YAMLError as err: raise AnsibleError( "Failed to parse the requirements yml at '%s' with the following error:\n%s" % (to_native(requirements_file), to_native(err))) if requirements_file is None: raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file)) def parse_role_req(requirement): if "include" not in requirement: role = RoleRequirement.role_yaml_parse(requirement) display.vvv("found role %s in yaml file" % to_text(role)) if "name" not in role and "src" not in role: raise AnsibleError("Must specify name or src for role") return [GalaxyRole(self.galaxy, **role)] else: b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict") if not os.path.isfile(b_include_path): raise AnsibleError("Failed to find include requirements file '%s' in '%s'" % (to_native(b_include_path), to_native(requirements_file))) with open(b_include_path, 'rb') as f_include: try: return [GalaxyRole(self.galaxy, **r) for r in (RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))] except Exception as e: raise AnsibleError("Unable to load data from include requirements file: %s %s" % (to_native(requirements_file), to_native(e))) if isinstance(file_requirements, list): # Older format that contains only roles if not allow_old_format: raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains " "a list of collections to install") for role_req in file_requirements: requirements['roles'] += parse_role_req(role_req) else: # Newer format with a collections and/or roles key extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections'])) if extra_keys: raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements " "file. Found: %s" % (to_native(", ".join(extra_keys)))) for role_req in file_requirements.get('roles', []): requirements['roles'] += parse_role_req(role_req) for collection_req in file_requirements.get('collections', []): if isinstance(collection_req, dict): req_name = collection_req.get('name', None) if req_name is None: raise AnsibleError("Collections requirement entry should contain the key name.") req_version = collection_req.get('version', '*') req_source = collection_req.get('source', None) if req_source: # Try and match up the requirement source with our list of Galaxy API servers defined in the # config, otherwise create a server with that URL without any auth. req_source = next(iter([a for a in self.api_servers if req_source in [a.name, a.api_server]]), GalaxyAPI(self.galaxy, "explicit_requirement_%s" % req_name, req_source)) requirements['collections'].append((req_name, req_version, req_source)) else: requirements['collections'].append((collection_req, '*', None)) return requirements @staticmethod def exit_without_ignore(rc=1): """ Exits with the specified return code unless the option --ignore-errors was specified """ if not context.CLIARGS['ignore_errors']: raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.') @staticmethod def _display_role_info(role_info): text = [u"", u"Role: %s" % to_text(role_info['name'])] text.append(u"\tdescription: %s" % role_info.get('description', '')) for k in sorted(role_info.keys()): if k in GalaxyCLI.SKIP_INFO_KEYS: continue if isinstance(role_info[k], dict): text.append(u"\t%s:" % (k)) for key in sorted(role_info[k].keys()): if key in GalaxyCLI.SKIP_INFO_KEYS: continue text.append(u"\t\t%s: %s" % (key, role_info[k][key])) else: text.append(u"\t%s: %s" % (k, role_info[k])) return u'\n'.join(text) @staticmethod def _resolve_path(path): return os.path.abspath(os.path.expanduser(os.path.expandvars(path))) @staticmethod def _get_skeleton_galaxy_yml(template_path, inject_data): with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj: meta_template = to_text(template_obj.read(), errors='surrogate_or_strict') galaxy_meta = get_collections_galaxy_meta_info() required_config = [] optional_config = [] for meta_entry in galaxy_meta: config_list = required_config if meta_entry.get('required', False) else optional_config value = inject_data.get(meta_entry['key'], None) if not value: meta_type = meta_entry.get('type', 'str') if meta_type == 'str': value = '' elif meta_type == 'list': value = [] elif meta_type == 'dict': value = {} meta_entry['value'] = value config_list.append(meta_entry) link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)") const_pattern = re.compile(r"C\(([^)]+)\)") def comment_ify(v): if isinstance(v, list): v = ". ".join([l.rstrip('.') for l in v]) v = link_pattern.sub(r"\1 <\2>", v) v = const_pattern.sub(r"'\1'", v) return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False) def to_yaml(v): return yaml.safe_dump(v, default_flow_style=False).rstrip() env = Environment(loader=BaseLoader) env.filters['comment_ify'] = comment_ify env.filters['to_yaml'] = to_yaml template = env.from_string(meta_template) meta_value = template.render({'required_config': required_config, 'optional_config': optional_config}) return meta_value ############################ # execute actions ############################ def execute_role(self): """ Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init as listed below. """ # To satisfy doc build pass def execute_collection(self): """ Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as listed below. """ # To satisfy doc build pass def execute_build(self): """ Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy. By default, this command builds from the current working directory. You can optionally pass in the collection input path (where the ``galaxy.yml`` file is). """ force = context.CLIARGS['force'] output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path']) b_output_path = to_bytes(output_path, errors='surrogate_or_strict') if not os.path.exists(b_output_path): os.makedirs(b_output_path) elif os.path.isfile(b_output_path): raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path)) for collection_path in context.CLIARGS['args']: collection_path = GalaxyCLI._resolve_path(collection_path) build_collection(collection_path, output_path, force) def execute_init(self): """ Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format. Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``. """ galaxy_type = context.CLIARGS['type'] init_path = context.CLIARGS['init_path'] force = context.CLIARGS['force'] obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)] obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)] inject_data = dict( description='your {0} description'.format(galaxy_type), ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'), ) if galaxy_type == 'role': inject_data.update(dict( author='your name', company='your company (optional)', license='license (GPL-2.0-or-later, MIT, etc)', role_name=obj_name, role_type=context.CLIARGS['role_type'], issue_tracker_url='http://example.com/issue/tracker', repository_url='http://example.com/repository', documentation_url='http://docs.example.com', homepage_url='http://example.com', min_ansible_version=ansible_version[:3], # x.y )) obj_path = os.path.join(init_path, obj_name) elif galaxy_type == 'collection': namespace, collection_name = obj_name.split('.', 1) inject_data.update(dict( namespace=namespace, collection_name=collection_name, version='1.0.0', readme='README.md', authors=['your name <[email protected]>'], license=['GPL-2.0-or-later'], repository='http://example.com/repository', documentation='http://docs.example.com', homepage='http://example.com', issues='http://example.com/issue/tracker', )) obj_path = os.path.join(init_path, namespace, collection_name) b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict') if os.path.exists(b_obj_path): if os.path.isfile(obj_path): raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path)) elif not force: raise AnsibleError("- the directory %s already exists. " "You can use --force to re-initialize this directory,\n" "however it will reset any main.yml files that may have\n" "been modified there already." % to_native(obj_path)) if obj_skeleton is not None: own_skeleton = False skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE else: own_skeleton = True obj_skeleton = self.galaxy.default_role_skeleton_path skeleton_ignore_expressions = ['^.*/.git_keep$'] obj_skeleton = os.path.expanduser(obj_skeleton) skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions] if not os.path.exists(obj_skeleton): raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format( to_native(obj_skeleton), galaxy_type) ) template_env = Environment(loader=FileSystemLoader(obj_skeleton)) # create role directory if not os.path.exists(b_obj_path): os.makedirs(b_obj_path) for root, dirs, files in os.walk(obj_skeleton, topdown=True): rel_root = os.path.relpath(root, obj_skeleton) rel_dirs = rel_root.split(os.sep) rel_root_dir = rel_dirs[0] if galaxy_type == 'collection': # A collection can contain templates in playbooks/*/templates and roles/*/templates in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs else: in_templates_dir = rel_root_dir == 'templates' dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)] for f in files: filename, ext = os.path.splitext(f) if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re): continue elif galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2': # Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options # dynamically which requires special options to be set. # The templated data's keys must match the key name but the inject data contains collection_name # instead of name. We just make a copy and change the key back to name for this file. template_data = inject_data.copy() template_data['name'] = template_data.pop('collection_name') meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data) b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict') with open(b_dest_file, 'wb') as galaxy_obj: galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict')) elif ext == ".j2" and not in_templates_dir: src_template = os.path.join(rel_root, f) dest_file = os.path.join(obj_path, rel_root, filename) template_env.get_template(src_template).stream(inject_data).dump(dest_file, encoding='utf-8') else: f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton) shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path)) for d in dirs: b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict') if not os.path.exists(b_dir_path): os.makedirs(b_dir_path) display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name)) def execute_info(self): """ prints out detailed information about an installed role as well as info available from the galaxy API. """ roles_path = context.CLIARGS['roles_path'] data = '' for role in context.CLIARGS['args']: role_info = {'path': roles_path} gr = GalaxyRole(self.galaxy, role) install_info = gr.install_info if install_info: if 'version' in install_info: install_info['installed_version'] = install_info['version'] del install_info['version'] role_info.update(install_info) remote_data = False if not context.CLIARGS['offline']: remote_data = self.api.lookup_role_by_name(role, False) if remote_data: role_info.update(remote_data) if gr.metadata: role_info.update(gr.metadata) req = RoleRequirement() role_spec = req.role_yaml_parse({'role': role}) if role_spec: role_info.update(role_spec) data = self._display_role_info(role_info) # FIXME: This is broken in both 1.9 and 2.0 as # _display_role_info() always returns something if not data: data = u"\n- the role %s was not found" % role self.pager(data) def execute_install(self): """ Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``). You can pass in a list (roles or collections) or use the file option listed below (these are mutually exclusive). If you pass in a list, it can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file. """ if context.CLIARGS['type'] == 'collection': collections = context.CLIARGS['args'] force = context.CLIARGS['force'] output_path = context.CLIARGS['collections_path'] ignore_certs = context.CLIARGS['ignore_certs'] ignore_errors = context.CLIARGS['ignore_errors'] requirements_file = context.CLIARGS['requirements'] no_deps = context.CLIARGS['no_deps'] force_deps = context.CLIARGS['force_with_deps'] if collections and requirements_file: raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.") elif not collections and not requirements_file: raise AnsibleError("You must specify a collection name or a requirements file.") if requirements_file: requirements_file = GalaxyCLI._resolve_path(requirements_file) requirements = self._parse_requirements_file(requirements_file, allow_old_format=False)['collections'] else: requirements = [] for collection_input in collections: name, dummy, requirement = collection_input.partition(':') requirements.append((name, requirement or '*', None)) output_path = GalaxyCLI._resolve_path(output_path) collections_path = C.COLLECTIONS_PATHS if len([p for p in collections_path if p.startswith(output_path)]) == 0: display.warning("The specified collections path '%s' is not part of the configured Ansible " "collections paths '%s'. The installed collection won't be picked up in an Ansible " "run." % (to_text(output_path), to_text(":".join(collections_path)))) if os.path.split(output_path)[1] != 'ansible_collections': output_path = os.path.join(output_path, 'ansible_collections') b_output_path = to_bytes(output_path, errors='surrogate_or_strict') if not os.path.exists(b_output_path): os.makedirs(b_output_path) install_collections(requirements, output_path, self.api_servers, (not ignore_certs), ignore_errors, no_deps, force, force_deps) return 0 role_file = context.CLIARGS['role_file'] if not context.CLIARGS['args'] and role_file is None: # the user needs to specify one of either --role-file or specify a single user/role name raise AnsibleOptionsError("- you must specify a user/role name or a roles file") no_deps = context.CLIARGS['no_deps'] force_deps = context.CLIARGS['force_with_deps'] force = context.CLIARGS['force'] or force_deps roles_left = [] if role_file: if not (role_file.endswith('.yaml') or role_file.endswith('.yml')): raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension") roles_left = self._parse_requirements_file(role_file)['roles'] else: # roles were specified directly, so we'll just go out grab them # (and their dependencies, unless the user doesn't want us to). for rname in context.CLIARGS['args']: role = RoleRequirement.role_yaml_parse(rname.strip()) roles_left.append(GalaxyRole(self.galaxy, **role)) for role in roles_left: # only process roles in roles files when names matches if given if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']: display.vvv('Skipping role %s' % role.name) continue display.vvv('Processing role %s ' % role.name) # query the galaxy API for the role data if role.install_info is not None: if role.install_info['version'] != role.version or force: if force: display.display('- changing role %s from %s to %s' % (role.name, role.install_info['version'], role.version or "unspecified")) role.remove() else: display.warning('- %s (%s) is already installed - use --force to change version to %s' % (role.name, role.install_info['version'], role.version or "unspecified")) continue else: if not force: display.display('- %s is already installed, skipping.' % str(role)) continue try: installed = role.install() except AnsibleError as e: display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e))) self.exit_without_ignore() continue # install dependencies, if we want them if not no_deps and installed: if not role.metadata: display.warning("Meta file %s is empty. Skipping dependencies." % role.path) else: role_dependencies = role.metadata.get('dependencies') or [] for dep in role_dependencies: display.debug('Installing dep %s' % dep) dep_req = RoleRequirement() dep_info = dep_req.role_yaml_parse(dep) dep_role = GalaxyRole(self.galaxy, **dep_info) if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None: # we know we can skip this, as it's not going to # be found on galaxy.ansible.com continue if dep_role.install_info is None: if dep_role not in roles_left: display.display('- adding dependency: %s' % to_text(dep_role)) roles_left.append(dep_role) else: display.display('- dependency %s already pending installation.' % dep_role.name) else: if dep_role.install_info['version'] != dep_role.version: if force_deps: display.display('- changing dependant role %s from %s to %s' % (dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified")) dep_role.remove() roles_left.append(dep_role) else: display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' % (to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version'])) else: if force_deps: roles_left.append(dep_role) else: display.display('- dependency %s is already installed, skipping.' % dep_role.name) if not installed: display.warning("- %s was NOT installed successfully." % role.name) self.exit_without_ignore() return 0 def execute_remove(self): """ removes the list of roles passed as arguments from the local system. """ if not context.CLIARGS['args']: raise AnsibleOptionsError('- you must specify at least one role to remove.') for role_name in context.CLIARGS['args']: role = GalaxyRole(self.galaxy, role_name) try: if role.remove(): display.display('- successfully removed %s' % role_name) else: display.display('- %s is not installed, skipping.' % role_name) except Exception as e: raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e))) return 0 def execute_list(self): """ lists the roles installed on the local system or matches a single role passed as an argument. """ def _display_role(gr): install_info = gr.install_info version = None if install_info: version = install_info.get("version", None) if not version: version = "(unknown version)" display.display("- %s, %s" % (gr.name, version)) if context.CLIARGS['role']: # show the requested role, if it exists name = context.CLIARGS['role'] gr = GalaxyRole(self.galaxy, name) if gr.metadata: display.display('# %s' % os.path.dirname(gr.path)) _display_role(gr) else: display.display("- the role %s was not found" % name) else: # show all valid roles in the roles_path directory roles_path = context.CLIARGS['roles_path'] path_found = False warnings = [] for path in roles_path: role_path = os.path.expanduser(path) if not os.path.exists(role_path): warnings.append("- the configured path %s does not exist." % role_path) continue elif not os.path.isdir(role_path): warnings.append("- the configured path %s, exists, but it is not a directory." % role_path) continue display.display('# %s' % role_path) path_files = os.listdir(role_path) path_found = True for path_file in path_files: gr = GalaxyRole(self.galaxy, path_file, path=path) if gr.metadata: _display_role(gr) for w in warnings: display.warning(w) if not path_found: raise AnsibleOptionsError("- None of the provided paths was usable. Please specify a valid path with --roles-path") return 0 def execute_publish(self): """ Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish. """ collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args']) wait = context.CLIARGS['wait'] timeout = context.CLIARGS['import_timeout'] publish_collection(collection_path, self.api, wait, timeout) def execute_search(self): ''' searches for roles on the Ansible Galaxy server''' page_size = 1000 search = None if context.CLIARGS['args']: search = '+'.join(context.CLIARGS['args']) if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']: raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.") response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'], tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size) if response['count'] == 0: display.display("No roles match your search.", color=C.COLOR_ERROR) return True data = [u''] if response['count'] > page_size: data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size)) else: data.append(u"Found %d roles matching your search:" % response['count']) max_len = [] for role in response['results']: max_len.append(len(role['username'] + '.' + role['name'])) name_len = max(max_len) format_str = u" %%-%ds %%s" % name_len data.append(u'') data.append(format_str % (u"Name", u"Description")) data.append(format_str % (u"----", u"-----------")) for role in response['results']: data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description'])) data = u'\n'.join(data) self.pager(data) return True def execute_login(self): """ verify user's identify via Github and retrieve an auth token from Ansible Galaxy. """ # Authenticate with github and retrieve a token if context.CLIARGS['token'] is None: if C.GALAXY_TOKEN: github_token = C.GALAXY_TOKEN else: login = GalaxyLogin(self.galaxy) github_token = login.create_github_token() else: github_token = context.CLIARGS['token'] galaxy_response = self.api.authenticate(github_token) if context.CLIARGS['token'] is None and C.GALAXY_TOKEN is None: # Remove the token we created login.remove_github_token() # Store the Galaxy token token = GalaxyToken() token.set(galaxy_response['token']) display.display("Successfully logged into Galaxy as %s" % galaxy_response['username']) return 0 def execute_import(self): """ used to import a role into Ansible Galaxy """ colors = { 'INFO': 'normal', 'WARNING': C.COLOR_WARN, 'ERROR': C.COLOR_ERROR, 'SUCCESS': C.COLOR_OK, 'FAILED': C.COLOR_ERROR, } github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict') github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict') if context.CLIARGS['check_status']: task = self.api.get_import_task(github_user=github_user, github_repo=github_repo) else: # Submit an import request task = self.api.create_import_task(github_user, github_repo, reference=context.CLIARGS['reference'], role_name=context.CLIARGS['role_name']) if len(task) > 1: # found multiple roles associated with github_user/github_repo display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo), color='yellow') display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED) for t in task: display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED) display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo), color=C.COLOR_CHANGED) return 0 # found a single role as expected display.display("Successfully submitted import request %d" % task[0]['id']) if not context.CLIARGS['wait']: display.display("Role name: %s" % task[0]['summary_fields']['role']['name']) display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo'])) if context.CLIARGS['check_status'] or context.CLIARGS['wait']: # Get the status of the import msg_list = [] finished = False while not finished: task = self.api.get_import_task(task_id=task[0]['id']) for msg in task[0]['summary_fields']['task_messages']: if msg['id'] not in msg_list: display.display(msg['message_text'], color=colors[msg['message_type']]) msg_list.append(msg['id']) if task[0]['state'] in ['SUCCESS', 'FAILED']: finished = True else: time.sleep(10) return 0 def execute_setup(self): """ Setup an integration from Github or Travis for Ansible Galaxy roles""" if context.CLIARGS['setup_list']: # List existing integration secrets secrets = self.api.list_secrets() if len(secrets) == 0: # None found display.display("No integrations found.") return 0 display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK) display.display("---------- ---------- ----------", color=C.COLOR_OK) for secret in secrets: display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'], secret['github_repo']), color=C.COLOR_OK) return 0 if context.CLIARGS['remove_id']: # Remove a secret self.api.remove_secret(context.CLIARGS['remove_id']) display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK) return 0 source = context.CLIARGS['source'] github_user = context.CLIARGS['github_user'] github_repo = context.CLIARGS['github_repo'] secret = context.CLIARGS['secret'] resp = self.api.add_secret(source, github_user, github_repo, secret) display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo'])) return 0 def execute_delete(self): """ Delete a role from Ansible Galaxy. """ github_user = context.CLIARGS['github_user'] github_repo = context.CLIARGS['github_repo'] resp = self.api.delete_role(github_user, github_repo) if len(resp['deleted_roles']) > 1: display.display("Deleted the following roles:") display.display("ID User Name") display.display("------ --------------- ----------") for role in resp['deleted_roles']: display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name)) display.display(resp['status']) return True
closed
ansible/ansible
https://github.com/ansible/ansible
61,609
ansible-galaxy 2.9 role install ignores --server option
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY Role install in the current `stable-2.9` branch attempts to install from `http://galaxy.ansible.com/` ignoring anything passed to `--server`. Other role commands, like `import`, seem to respect the server parameter properly. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ansible-galaxy ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.9.0b1 config file = /etc/ansible/ansible.cfg configured module search path = ['/home/calvin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/calvin/projects/ansible/lib/ansible executable location = /home/calvin/.local/share/virtualenvs/orion/bin/ansible python version = 3.6.9 (default, Jul 3 2019, 17:57:57) [GCC 8.3.1 20190223 (Red Hat 8.3.1-2)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Fedora 29 ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ansible-galaxy -vvv install --server=http://galaxy-dev.ansible.com orionuser1.ansible_role_logstash ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Expect the given role to be installed from the specified Galaxy server, rather than the default production instance. Especially for automated tests which we do not want to run against production. ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below (orion) [calvin@localhost ansible]$ ansible-galaxy -vvv install --server=http://galaxy-dev.ansible.com orionuser1.ansible_role_logstash [DEPRECATION WARNING]: Setting verbosity before the arg sub command is deprecated, set the verbosity after the sub command. This feature will be removed in version 2.13. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. ansible-galaxy 2.9.0b1 config file = /etc/ansible/ansible.cfg configured module search path = ['/home/calvin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/calvin/projects/ansible/lib/ansible executable location = /home/calvin/.local/share/virtualenvs/orion/bin/ansible-galaxy python version = 3.6.9 (default, Jul 3 2019, 17:57:57) [GCC 8.3.1 20190223 (Red Hat 8.3.1-2)] Using /etc/ansible/ansible.cfg as config file Processing role orionuser1.ansible_role_logstash - downloading role 'ansible_role_logstash', owned by orionuser1 [WARNING]: - orionuser1.ansible_role_logstash was NOT installed successfully: - sorry, orionuser1.ansible_role_logstash was not found on https://galaxy.ansible.com. ERROR! - you can use --ignore-errors to skip failed roles and finish processing the list. ```
https://github.com/ansible/ansible/issues/61609
https://github.com/ansible/ansible/pull/61820
f81b7dd10a674c72659b25be20461865bb997eed
3a7b77a94ce534c502828ebd2959f6fdf9d183f5
2019-08-30T16:03:22Z
python
2019-09-05T01:46:44Z
lib/ansible/galaxy/__init__.py
######################################################################## # # (C) 2015, Brian Coca <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ######################################################################## ''' This manages remote shared Ansible objects, mainly roles''' from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import yaml import ansible.constants as C from ansible import context from ansible.module_utils._text import to_bytes # default_readme_template # default_meta_template def get_collections_galaxy_meta_info(): meta_path = os.path.join(os.path.dirname(__file__), 'data', 'collections_galaxy_meta.yml') with open(to_bytes(meta_path, errors='surrogate_or_strict'), 'rb') as galaxy_obj: return yaml.safe_load(galaxy_obj) class Galaxy(object): ''' Keeps global galaxy info ''' def __init__(self): # roles_path needs to be a list and will be by default roles_path = context.CLIARGS.get('roles_path', C.DEFAULT_ROLES_PATH) # cli option handling is responsible for splitting roles_path self.roles_paths = roles_path self.roles = {} # load data path for resource usage this_dir, this_filename = os.path.split(__file__) type_path = context.CLIARGS.get('role_type', 'default') if type_path == 'default': type_path = os.path.join(type_path, context.CLIARGS.get('type')) self.DATA_PATH = os.path.join(this_dir, 'data', type_path) @property def default_role_skeleton_path(self): return self.DATA_PATH def add_role(self, role): self.roles[role.name] = role def remove_role(self, role_name): del self.roles[role_name]
closed
ansible/ansible
https://github.com/ansible/ansible
61,609
ansible-galaxy 2.9 role install ignores --server option
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY Role install in the current `stable-2.9` branch attempts to install from `http://galaxy.ansible.com/` ignoring anything passed to `--server`. Other role commands, like `import`, seem to respect the server parameter properly. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ansible-galaxy ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.9.0b1 config file = /etc/ansible/ansible.cfg configured module search path = ['/home/calvin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/calvin/projects/ansible/lib/ansible executable location = /home/calvin/.local/share/virtualenvs/orion/bin/ansible python version = 3.6.9 (default, Jul 3 2019, 17:57:57) [GCC 8.3.1 20190223 (Red Hat 8.3.1-2)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Fedora 29 ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ansible-galaxy -vvv install --server=http://galaxy-dev.ansible.com orionuser1.ansible_role_logstash ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Expect the given role to be installed from the specified Galaxy server, rather than the default production instance. Especially for automated tests which we do not want to run against production. ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below (orion) [calvin@localhost ansible]$ ansible-galaxy -vvv install --server=http://galaxy-dev.ansible.com orionuser1.ansible_role_logstash [DEPRECATION WARNING]: Setting verbosity before the arg sub command is deprecated, set the verbosity after the sub command. This feature will be removed in version 2.13. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. ansible-galaxy 2.9.0b1 config file = /etc/ansible/ansible.cfg configured module search path = ['/home/calvin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/calvin/projects/ansible/lib/ansible executable location = /home/calvin/.local/share/virtualenvs/orion/bin/ansible-galaxy python version = 3.6.9 (default, Jul 3 2019, 17:57:57) [GCC 8.3.1 20190223 (Red Hat 8.3.1-2)] Using /etc/ansible/ansible.cfg as config file Processing role orionuser1.ansible_role_logstash - downloading role 'ansible_role_logstash', owned by orionuser1 [WARNING]: - orionuser1.ansible_role_logstash was NOT installed successfully: - sorry, orionuser1.ansible_role_logstash was not found on https://galaxy.ansible.com. ERROR! - you can use --ignore-errors to skip failed roles and finish processing the list. ```
https://github.com/ansible/ansible/issues/61609
https://github.com/ansible/ansible/pull/61820
f81b7dd10a674c72659b25be20461865bb997eed
3a7b77a94ce534c502828ebd2959f6fdf9d183f5
2019-08-30T16:03:22Z
python
2019-09-05T01:46:44Z
lib/ansible/galaxy/role.py
######################################################################## # # (C) 2015, Brian Coca <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ######################################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type import errno import datetime import os import tarfile import tempfile import yaml from distutils.version import LooseVersion from shutil import rmtree import ansible.constants as C from ansible import context from ansible.errors import AnsibleError from ansible.module_utils._text import to_native, to_text from ansible.module_utils.urls import open_url from ansible.playbook.role.requirement import RoleRequirement from ansible.galaxy.api import GalaxyAPI from ansible.utils.display import Display display = Display() class GalaxyRole(object): SUPPORTED_SCMS = set(['git', 'hg']) META_MAIN = (os.path.join('meta', 'main.yml'), os.path.join('meta', 'main.yaml')) META_INSTALL = os.path.join('meta', '.galaxy_install_info') ROLE_DIRS = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests') def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None): self._metadata = None self._install_info = None self._validate_certs = not context.CLIARGS['ignore_certs'] display.debug('Validate TLS certificates: %s' % self._validate_certs) self.galaxy = galaxy self.name = name self.version = version self.src = src or name self.scm = scm if path is not None: if self.name not in path: path = os.path.join(path, self.name) self.path = path else: # use the first path by default self.path = os.path.join(galaxy.roles_paths[0], self.name) # create list of possible paths self.paths = [x for x in galaxy.roles_paths] self.paths = [os.path.join(x, self.name) for x in self.paths] def __repr__(self): """ Returns "rolename (version)" if version is not null Returns "rolename" otherwise """ if self.version: return "%s (%s)" % (self.name, self.version) else: return self.name def __eq__(self, other): return self.name == other.name @property def metadata(self): """ Returns role metadata """ if self._metadata is None: for meta_main in self.META_MAIN: meta_path = os.path.join(self.path, meta_main) if os.path.isfile(meta_path): try: f = open(meta_path, 'r') self._metadata = yaml.safe_load(f) except Exception: display.vvvvv("Unable to load metadata for %s" % self.name) return False finally: f.close() return self._metadata @property def install_info(self): """ Returns role install info """ if self._install_info is None: info_path = os.path.join(self.path, self.META_INSTALL) if os.path.isfile(info_path): try: f = open(info_path, 'r') self._install_info = yaml.safe_load(f) except Exception: display.vvvvv("Unable to load Galaxy install info for %s" % self.name) return False finally: f.close() return self._install_info def _write_galaxy_install_info(self): """ Writes a YAML-formatted file to the role's meta/ directory (named .galaxy_install_info) which contains some information we can use later for commands like 'list' and 'info'. """ info = dict( version=self.version, install_date=datetime.datetime.utcnow().strftime("%c"), ) if not os.path.exists(os.path.join(self.path, 'meta')): os.makedirs(os.path.join(self.path, 'meta')) info_path = os.path.join(self.path, self.META_INSTALL) with open(info_path, 'w+') as f: try: self._install_info = yaml.safe_dump(info, f) except Exception: return False return True def remove(self): """ Removes the specified role from the roles path. There is a sanity check to make sure there's a meta/main.yml file at this path so the user doesn't blow away random directories. """ if self.metadata: try: rmtree(self.path) return True except Exception: pass return False def fetch(self, role_data): """ Downloads the archived role to a temp location based on role data """ if role_data: # first grab the file and save it to a temp location if "github_user" in role_data and "github_repo" in role_data: archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version) else: archive_url = self.src display.display("- downloading role from %s" % archive_url) try: url_file = open_url(archive_url, validate_certs=self._validate_certs) temp_file = tempfile.NamedTemporaryFile(delete=False) data = url_file.read() while data: temp_file.write(data) data = url_file.read() temp_file.close() return temp_file.name except Exception as e: display.error(u"failed to download the file: %s" % to_text(e)) return False def install(self): if self.scm: # create tar file from scm url tmp_file = RoleRequirement.scm_archive_role(keep_scm_meta=context.CLIARGS['keep_scm_meta'], **self.spec) elif self.src: if os.path.isfile(self.src): tmp_file = self.src elif '://' in self.src: role_data = self.src tmp_file = self.fetch(role_data) else: api = GalaxyAPI(self.galaxy, 'role_default', C.GALAXY_SERVER) role_data = api.lookup_role_by_name(self.src) if not role_data: raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server)) if role_data.get('role_type') == 'APP': # Container Role display.warning("%s is a Container App role, and should only be installed using Ansible " "Container" % self.name) role_versions = api.fetch_role_related('versions', role_data['id']) if not self.version: # convert the version names to LooseVersion objects # and sort them to get the latest version. If there # are no versions in the list, we'll grab the head # of the master branch if len(role_versions) > 0: loose_versions = [LooseVersion(a.get('name', None)) for a in role_versions] try: loose_versions.sort() except TypeError: raise AnsibleError( 'Unable to compare role versions (%s) to determine the most recent version due to incompatible version formats. ' 'Please contact the role author to resolve versioning conflicts, or specify an explicit role version to ' 'install.' % ', '.join([v.vstring for v in loose_versions]) ) self.version = to_text(loose_versions[-1]) elif role_data.get('github_branch', None): self.version = role_data['github_branch'] else: self.version = 'master' elif self.version != 'master': if role_versions and to_text(self.version) not in [a.get('name', None) for a in role_versions]: raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions)) # check if there's a source link for our role_version for role_version in role_versions: if role_version['name'] == self.version and 'source' in role_version: self.src = role_version['source'] tmp_file = self.fetch(role_data) else: raise AnsibleError("No valid role data found") if tmp_file: display.debug("installing from %s" % tmp_file) if not tarfile.is_tarfile(tmp_file): raise AnsibleError("the downloaded file does not appear to be a valid tar archive.") else: role_tar_file = tarfile.open(tmp_file, "r") # verify the role's meta file meta_file = None members = role_tar_file.getmembers() # next find the metadata file for member in members: for meta_main in self.META_MAIN: if meta_main in member.name: # Look for parent of meta/main.yml # Due to possibility of sub roles each containing meta/main.yml # look for shortest length parent meta_parent_dir = os.path.dirname(os.path.dirname(member.name)) if not meta_file: archive_parent_dir = meta_parent_dir meta_file = member else: if len(meta_parent_dir) < len(archive_parent_dir): archive_parent_dir = meta_parent_dir meta_file = member if not meta_file: raise AnsibleError("this role does not appear to have a meta/main.yml file.") else: try: self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file)) except Exception: raise AnsibleError("this role does not appear to have a valid meta/main.yml file.") # we strip off any higher-level directories for all of the files contained within # the tar file here. The default is 'github_repo-target'. Gerrit instances, on the other # hand, does not have a parent directory at all. installed = False while not installed: display.display("- extracting %s to %s" % (self.name, self.path)) try: if os.path.exists(self.path): if not os.path.isdir(self.path): raise AnsibleError("the specified roles path exists and is not a directory.") elif not context.CLIARGS.get("force", False): raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name) else: # using --force, remove the old path if not self.remove(): raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really " "want to put the role here." % self.path) else: os.makedirs(self.path) # now we do the actual extraction to the path for member in members: # we only extract files, and remove any relative path # bits that might be in the file for security purposes # and drop any containing directory, as mentioned above if member.isreg() or member.issym(): parts = member.name.replace(archive_parent_dir, "", 1).split(os.sep) final_parts = [] for part in parts: if part != '..' and '~' not in part and '$' not in part: final_parts.append(part) member.name = os.path.join(*final_parts) role_tar_file.extract(member, self.path) # write out the install info file for later use self._write_galaxy_install_info() installed = True except OSError as e: error = True if e.errno == errno.EACCES and len(self.paths) > 1: current = self.paths.index(self.path) if len(self.paths) > current: self.path = self.paths[current + 1] error = False if error: raise AnsibleError("Could not update files in %s: %s" % (self.path, to_native(e))) # return the parsed yaml metadata display.display("- %s was installed successfully" % str(self)) if not (self.src and os.path.isfile(self.src)): try: os.unlink(tmp_file) except (OSError, IOError) as e: display.warning(u"Unable to remove tmp file (%s): %s" % (tmp_file, to_text(e))) return True return False @property def spec(self): """ Returns role spec info { 'scm': 'git', 'src': 'http://git.example.com/repos/repo.git', 'version': 'v1.0', 'name': 'repo' } """ return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
closed
ansible/ansible
https://github.com/ansible/ansible
59,804
Redfish: Power commands not mapping to allowable ResetType
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> For the Power* commands (redfish_utils.py:manage_system_power(), there is some logic to check the allowable ResetType values and try to map the requested command to one that is supported by the system. For example, map `GracefulRestart` to `ForceRestart` if `ForceRestart` is allowed but `GracefulRestart` is not. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> redfish_command.py redfish_utils.py ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.9.0.dev0 config file = $HOME/.ansible.cfg configured module search path = ['$HOME/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = $HOME/Development/git/ansible/lib/ansible executable location = $HOME/Development/git/ansible/bin/ansible python version = 3.6.5 (default, Apr 25 2018, 14:26:36) [GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below [no output] ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Applies to Redfish OOB controllers. ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Issue `PowerGracefulRestart` command for a system where `ForceRestart` is allowed but `GracefulRestart` is not. <!--- Paste example playbooks or commands between quotes below --> ```yaml --- - hosts: myhosts connection: local name: Manage System Power - Greaceful restart gather_facts: False tasks: - name: Restart system power gracefully redfish_command: category: Systems command: PowerGracefulRestart baseuri: "{{ baseuri }}" username: "{{ username }}" password: "{{ password }}" ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Command will successfully issue a computer system reset with `ResetType` of `ForceRestart`. ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> Command failed with: <!--- Paste verbatim command output between quotes --> ```paste below Unable to complete the operation because the value GracefulRestart entered for the property ResetType is not in the list of acceptable values ```
https://github.com/ansible/ansible/issues/59804
https://github.com/ansible/ansible/pull/59927
823c108facd19de183671ab17e2b189ebea5fa81
2a932ad7cfe27663d47cbd43de124fc756632361
2019-07-30T17:59:14Z
python
2019-09-05T09:39:21Z
changelogs/fragments/59927-fix-redfish-power-reset-type-mapping.yaml
closed
ansible/ansible
https://github.com/ansible/ansible
59,804
Redfish: Power commands not mapping to allowable ResetType
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> For the Power* commands (redfish_utils.py:manage_system_power(), there is some logic to check the allowable ResetType values and try to map the requested command to one that is supported by the system. For example, map `GracefulRestart` to `ForceRestart` if `ForceRestart` is allowed but `GracefulRestart` is not. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> redfish_command.py redfish_utils.py ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.9.0.dev0 config file = $HOME/.ansible.cfg configured module search path = ['$HOME/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = $HOME/Development/git/ansible/lib/ansible executable location = $HOME/Development/git/ansible/bin/ansible python version = 3.6.5 (default, Apr 25 2018, 14:26:36) [GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below [no output] ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Applies to Redfish OOB controllers. ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Issue `PowerGracefulRestart` command for a system where `ForceRestart` is allowed but `GracefulRestart` is not. <!--- Paste example playbooks or commands between quotes below --> ```yaml --- - hosts: myhosts connection: local name: Manage System Power - Greaceful restart gather_facts: False tasks: - name: Restart system power gracefully redfish_command: category: Systems command: PowerGracefulRestart baseuri: "{{ baseuri }}" username: "{{ username }}" password: "{{ password }}" ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Command will successfully issue a computer system reset with `ResetType` of `ForceRestart`. ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> Command failed with: <!--- Paste verbatim command output between quotes --> ```paste below Unable to complete the operation because the value GracefulRestart entered for the property ResetType is not in the list of acceptable values ```
https://github.com/ansible/ansible/issues/59804
https://github.com/ansible/ansible/pull/59927
823c108facd19de183671ab17e2b189ebea5fa81
2a932ad7cfe27663d47cbd43de124fc756632361
2019-07-30T17:59:14Z
python
2019-09-05T09:39:21Z
lib/ansible/module_utils/redfish_utils.py
# Copyright (c) 2017-2018 Dell EMC Inc. # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type import json from ansible.module_utils.urls import open_url from ansible.module_utils._text import to_text from ansible.module_utils.six.moves import http_client from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'} POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', 'OData-Version': '4.0'} PATCH_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', 'OData-Version': '4.0'} DELETE_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'} class RedfishUtils(object): def __init__(self, creds, root_uri, timeout, module): self.root_uri = root_uri self.creds = creds self.timeout = timeout self.module = module self.service_root = '/redfish/v1/' self._init_session() # The following functions are to send GET/POST/PATCH/DELETE requests def get_request(self, uri): try: resp = open_url(uri, method="GET", headers=GET_HEADERS, url_username=self.creds['user'], url_password=self.creds['pswd'], force_basic_auth=True, validate_certs=False, follow_redirects='all', use_proxy=False, timeout=self.timeout) data = json.loads(resp.read()) headers = dict((k.lower(), v) for (k, v) in resp.info().items()) except HTTPError as e: msg = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on GET request to '%s', extended message: '%s'" % (e.code, uri, msg), 'status': e.code} except URLError as e: return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'" % (uri, e.reason)} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, 'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))} return {'ret': True, 'data': data, 'headers': headers} def post_request(self, uri, pyld): try: resp = open_url(uri, data=json.dumps(pyld), headers=POST_HEADERS, method="POST", url_username=self.creds['user'], url_password=self.creds['pswd'], force_basic_auth=True, validate_certs=False, follow_redirects='all', use_proxy=False, timeout=self.timeout) except HTTPError as e: msg = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on POST request to '%s', extended message: '%s'" % (e.code, uri, msg), 'status': e.code} except URLError as e: return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'" % (uri, e.reason)} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, 'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))} return {'ret': True, 'resp': resp} def patch_request(self, uri, pyld): headers = PATCH_HEADERS r = self.get_request(uri) if r['ret']: # Get etag from etag header or @odata.etag property etag = r['headers'].get('etag') if not etag: etag = r['data'].get('@odata.etag') if etag: # Make copy of headers and add If-Match header headers = dict(headers) headers['If-Match'] = etag try: resp = open_url(uri, data=json.dumps(pyld), headers=headers, method="PATCH", url_username=self.creds['user'], url_password=self.creds['pswd'], force_basic_auth=True, validate_certs=False, follow_redirects='all', use_proxy=False, timeout=self.timeout) except HTTPError as e: msg = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on PATCH request to '%s', extended message: '%s'" % (e.code, uri, msg), 'status': e.code} except URLError as e: return {'ret': False, 'msg': "URL Error on PATCH request to '%s': '%s'" % (uri, e.reason)} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, 'msg': "Failed PATCH request to '%s': '%s'" % (uri, to_text(e))} return {'ret': True, 'resp': resp} def delete_request(self, uri, pyld=None): try: data = json.dumps(pyld) if pyld else None resp = open_url(uri, data=data, headers=DELETE_HEADERS, method="DELETE", url_username=self.creds['user'], url_password=self.creds['pswd'], force_basic_auth=True, validate_certs=False, follow_redirects='all', use_proxy=False, timeout=self.timeout) except HTTPError as e: msg = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on DELETE request to '%s', extended message: '%s'" % (e.code, uri, msg), 'status': e.code} except URLError as e: return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'" % (uri, e.reason)} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, 'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))} return {'ret': True, 'resp': resp} @staticmethod def _get_extended_message(error): """ Get Redfish ExtendedInfo message from response payload if present :param error: an HTTPError exception :type error: HTTPError :return: the ExtendedInfo message if present, else standard HTTP error """ msg = http_client.responses.get(error.code, '') if error.code >= 400: try: body = error.read().decode('utf-8') data = json.loads(body) ext_info = data['error']['@Message.ExtendedInfo'] msg = ext_info[0]['Message'] except Exception: pass return msg def _init_session(self): pass def _find_accountservice_resource(self): response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] if 'AccountService' not in data: return {'ret': False, 'msg': "AccountService resource not found"} else: account_service = data["AccountService"]["@odata.id"] response = self.get_request(self.root_uri + account_service) if response['ret'] is False: return response data = response['data'] accounts = data['Accounts']['@odata.id'] if accounts[-1:] == '/': accounts = accounts[:-1] self.accounts_uri = accounts return {'ret': True} def _find_sessionservice_resource(self): response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] if 'SessionService' not in data: return {'ret': False, 'msg': "SessionService resource not found"} else: session_service = data["SessionService"]["@odata.id"] response = self.get_request(self.root_uri + session_service) if response['ret'] is False: return response data = response['data'] sessions = data['Sessions']['@odata.id'] if sessions[-1:] == '/': sessions = sessions[:-1] self.sessions_uri = sessions return {'ret': True} def _find_systems_resource(self): response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] if 'Systems' not in data: return {'ret': False, 'msg': "Systems resource not found"} response = self.get_request(self.root_uri + data['Systems']['@odata.id']) if response['ret'] is False: return response self.systems_uris = [ i['@odata.id'] for i in response['data'].get('Members', [])] if not self.systems_uris: return { 'ret': False, 'msg': "ComputerSystem's Members array is either empty or missing"} return {'ret': True} def _find_updateservice_resource(self): response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] if 'UpdateService' not in data: return {'ret': False, 'msg': "UpdateService resource not found"} else: update = data["UpdateService"]["@odata.id"] self.update_uri = update response = self.get_request(self.root_uri + update) if response['ret'] is False: return response data = response['data'] firmware_inventory = data['FirmwareInventory'][u'@odata.id'] self.firmware_uri = firmware_inventory return {'ret': True} def _find_chassis_resource(self): chassis_service = [] response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] if 'Chassis' not in data: return {'ret': False, 'msg': "Chassis resource not found"} else: chassis = data["Chassis"]["@odata.id"] response = self.get_request(self.root_uri + chassis) if response['ret'] is False: return response data = response['data'] for member in data[u'Members']: chassis_service.append(member[u'@odata.id']) self.chassis_uri_list = chassis_service return {'ret': True} def _find_managers_resource(self): response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] if 'Managers' not in data: return {'ret': False, 'msg': "Manager resource not found"} else: manager = data["Managers"]["@odata.id"] response = self.get_request(self.root_uri + manager) if response['ret'] is False: return response data = response['data'] for member in data[u'Members']: manager_service = member[u'@odata.id'] self.manager_uri = manager_service return {'ret': True} def get_logs(self): log_svcs_uri_list = [] list_of_logs = [] properties = ['Severity', 'Created', 'EntryType', 'OemRecordFormat', 'Message', 'MessageId', 'MessageArgs'] # Find LogService response = self.get_request(self.root_uri + self.manager_uri) if response['ret'] is False: return response data = response['data'] if 'LogServices' not in data: return {'ret': False, 'msg': "LogServices resource not found"} # Find all entries in LogServices logs_uri = data["LogServices"]["@odata.id"] response = self.get_request(self.root_uri + logs_uri) if response['ret'] is False: return response data = response['data'] for log_svcs_entry in data.get('Members', []): response = self.get_request(self.root_uri + log_svcs_entry[u'@odata.id']) if response['ret'] is False: return response _data = response['data'] if 'Entries' in _data: log_svcs_uri_list.append(_data['Entries'][u'@odata.id']) # For each entry in LogServices, get log name and all log entries for log_svcs_uri in log_svcs_uri_list: logs = {} list_of_log_entries = [] response = self.get_request(self.root_uri + log_svcs_uri) if response['ret'] is False: return response data = response['data'] logs['Description'] = data.get('Description', 'Collection of log entries') # Get all log entries for each type of log found for logEntry in data.get('Members', []): entry = {} for prop in properties: if prop in logEntry: entry[prop] = logEntry.get(prop) if entry: list_of_log_entries.append(entry) log_name = log_svcs_uri.split('/')[-1] logs[log_name] = list_of_log_entries list_of_logs.append(logs) # list_of_logs[logs{list_of_log_entries[entry{}]}] return {'ret': True, 'entries': list_of_logs} def clear_logs(self): # Find LogService response = self.get_request(self.root_uri + self.manager_uri) if response['ret'] is False: return response data = response['data'] if 'LogServices' not in data: return {'ret': False, 'msg': "LogServices resource not found"} # Find all entries in LogServices logs_uri = data["LogServices"]["@odata.id"] response = self.get_request(self.root_uri + logs_uri) if response['ret'] is False: return response data = response['data'] for log_svcs_entry in data[u'Members']: response = self.get_request(self.root_uri + log_svcs_entry["@odata.id"]) if response['ret'] is False: return response _data = response['data'] # Check to make sure option is available, otherwise error is ugly if "Actions" in _data: if "#LogService.ClearLog" in _data[u"Actions"]: self.post_request(self.root_uri + _data[u"Actions"]["#LogService.ClearLog"]["target"], {}) if response['ret'] is False: return response return {'ret': True} def aggregate(self, func): ret = True entries = [] for systems_uri in self.systems_uris: inventory = func(systems_uri) ret = inventory.pop('ret') and ret if 'entries' in inventory: entries.append(({'systems_uri': systems_uri}, inventory['entries'])) return dict(ret=ret, entries=entries) def get_storage_controller_inventory(self, systems_uri): result = {} controller_list = [] controller_results = [] # Get these entries, but does not fail if not found properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers', 'Location', 'Manufacturer', 'Model', 'Name', 'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status'] key = "StorageControllers" # Find Storage service response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response data = response['data'] if 'Storage' not in data: return {'ret': False, 'msg': "Storage resource not found"} # Get a list of all storage controllers and build respective URIs storage_uri = data['Storage']["@odata.id"] response = self.get_request(self.root_uri + storage_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] # Loop through Members and their StorageControllers # and gather properties from each StorageController if data[u'Members']: for storage_member in data[u'Members']: storage_member_uri = storage_member[u'@odata.id'] response = self.get_request(self.root_uri + storage_member_uri) data = response['data'] if key in data: controller_list = data[key] for controller in controller_list: controller_result = {} for property in properties: if property in controller: controller_result[property] = controller[property] controller_results.append(controller_result) result['entries'] = controller_results return result else: return {'ret': False, 'msg': "Storage resource not found"} def get_multi_storage_controller_inventory(self): return self.aggregate(self.get_storage_controller_inventory) def get_disk_inventory(self, systems_uri): result = {'entries': []} controller_list = [] disk_results = [] # Get these entries, but does not fail if not found properties = ['BlockSizeBytes', 'CapableSpeedGbs', 'CapacityBytes', 'EncryptionAbility', 'EncryptionStatus', 'FailurePredicted', 'HotspareType', 'Id', 'Identifiers', 'Manufacturer', 'MediaType', 'Model', 'Name', 'PartNumber', 'PhysicalLocation', 'Protocol', 'Revision', 'RotationSpeedRPM', 'SerialNumber', 'Status'] # Find Storage service response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response data = response['data'] if 'SimpleStorage' not in data and 'Storage' not in data: return {'ret': False, 'msg': "SimpleStorage and Storage resource \ not found"} if 'Storage' in data: # Get a list of all storage controllers and build respective URIs storage_uri = data[u'Storage'][u'@odata.id'] response = self.get_request(self.root_uri + storage_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if data[u'Members']: for controller in data[u'Members']: controller_list.append(controller[u'@odata.id']) for c in controller_list: uri = self.root_uri + c response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] if 'Drives' in data: for device in data[u'Drives']: disk_uri = self.root_uri + device[u'@odata.id'] response = self.get_request(disk_uri) data = response['data'] disk_result = {} for property in properties: if property in data: if data[property] is not None: disk_result[property] = data[property] disk_results.append(disk_result) result["entries"].append(disk_results) if 'SimpleStorage' in data: # Get a list of all storage controllers and build respective URIs storage_uri = data["SimpleStorage"]["@odata.id"] response = self.get_request(self.root_uri + storage_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for controller in data[u'Members']: controller_list.append(controller[u'@odata.id']) for c in controller_list: uri = self.root_uri + c response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] for device in data[u'Devices']: disk_result = {} for property in properties: if property in device: disk_result[property] = device[property] disk_results.append(disk_result) result["entries"].append(disk_results) return result def get_multi_disk_inventory(self): return self.aggregate(self.get_disk_inventory) def get_volume_inventory(self, systems_uri): result = {'entries': []} controller_list = [] volume_list = [] volume_results = [] # Get these entries, but does not fail if not found properties = ['Id', 'Name', 'RAIDType', 'VolumeType', 'BlockSizeBytes', 'Capacity', 'CapacityBytes', 'CapacitySources', 'Encrypted', 'EncryptionTypes', 'Identifiers', 'Operations', 'OptimumIOSizeBytes', 'AccessCapabilities', 'AllocatedPools', 'Status'] # Find Storage service response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response data = response['data'] if 'SimpleStorage' not in data and 'Storage' not in data: return {'ret': False, 'msg': "SimpleStorage and Storage resource \ not found"} if 'Storage' in data: # Get a list of all storage controllers and build respective URIs storage_uri = data[u'Storage'][u'@odata.id'] response = self.get_request(self.root_uri + storage_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if data.get('Members'): for controller in data[u'Members']: controller_list.append(controller[u'@odata.id']) for c in controller_list: uri = self.root_uri + c response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] if 'Volumes' in data: # Get a list of all volumes and build respective URIs volumes_uri = data[u'Volumes'][u'@odata.id'] response = self.get_request(self.root_uri + volumes_uri) data = response['data'] if data.get('Members'): for volume in data[u'Members']: volume_list.append(volume[u'@odata.id']) for v in volume_list: uri = self.root_uri + v response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] volume_result = {} for property in properties: if property in data: if data[property] is not None: volume_result[property] = data[property] # Get related Drives Id drive_id_list = [] if 'Links' in data: if 'Drives' in data[u'Links']: for link in data[u'Links'][u'Drives']: drive_id_link = link[u'@odata.id'] drive_id = drive_id_link.split("/")[-1] drive_id_list.append({'Id': drive_id}) volume_result['Linked_drives'] = drive_id_list volume_results.append(volume_result) result["entries"].append(volume_results) else: return {'ret': False, 'msg': "Storage resource not found"} return result def get_multi_volume_inventory(self): return self.aggregate(self.get_volume_inventory) def restart_manager_gracefully(self): result = {} key = "Actions" # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + self.manager_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] action_uri = data[key]["#Manager.Reset"]["target"] payload = {'ResetType': 'GracefulRestart'} response = self.post_request(self.root_uri + action_uri, payload) if response['ret'] is False: return response return {'ret': True} def manage_indicator_led(self, command): result = {} key = 'IndicatorLED' payloads = {'IndicatorLedOn': 'Lit', 'IndicatorLedOff': 'Off', "IndicatorLedBlink": 'Blinking'} result = {} for chassis_uri in self.chassis_uri_list: response = self.get_request(self.root_uri + chassis_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} if command in payloads.keys(): payload = {'IndicatorLED': payloads[command]} response = self.patch_request(self.root_uri + chassis_uri, payload) if response['ret'] is False: return response else: return {'ret': False, 'msg': 'Invalid command'} return result def manage_system_power(self, command): key = "Actions" # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + self.systems_uris[0]) if response['ret'] is False: return response data = response['data'] power_state = data["PowerState"] if power_state == "On" and command == 'PowerOn': return {'ret': True, 'changed': False} if power_state == "Off" and command in ['PowerGracefulShutdown', 'PowerForceOff']: return {'ret': True, 'changed': False} reset_action = data[key]["#ComputerSystem.Reset"] action_uri = reset_action["target"] allowable_vals = reset_action.get("[email protected]", []) restart_cmd = "GracefulRestart" if "ForceRestart" in allowable_vals and "GracefulRestart" not in allowable_vals: restart_cmd = "ForceRestart" # Define payload accordingly if command == "PowerOn": payload = {'ResetType': 'On'} elif command == "PowerForceOff": payload = {'ResetType': 'ForceOff'} elif command == "PowerForceRestart": payload = {'ResetType': "ForceRestart"} elif command == "PowerGracefulRestart": payload = {'ResetType': 'GracefulRestart'} elif command == "PowerGracefulShutdown": payload = {'ResetType': 'GracefulShutdown'} elif command == "PowerReboot": if power_state == "On": payload = {'ResetType': restart_cmd} else: payload = {'ResetType': "On"} else: return {'ret': False, 'msg': 'Invalid Command'} response = self.post_request(self.root_uri + action_uri, payload) if response['ret'] is False: return response return {'ret': True, 'changed': True} def _find_account_uri(self, username=None, acct_id=None): if not any((username, acct_id)): return {'ret': False, 'msg': 'Must provide either account_id or account_username'} response = self.get_request(self.root_uri + self.accounts_uri) if response['ret'] is False: return response data = response['data'] uris = [a.get('@odata.id') for a in data.get('Members', []) if a.get('@odata.id')] for uri in uris: response = self.get_request(self.root_uri + uri) if response['ret'] is False: continue data = response['data'] headers = response['headers'] if username: if username == data.get('UserName'): return {'ret': True, 'data': data, 'headers': headers, 'uri': uri} if acct_id: if acct_id == data.get('Id'): return {'ret': True, 'data': data, 'headers': headers, 'uri': uri} return {'ret': False, 'no_match': True, 'msg': 'No account with the given account_id or account_username found'} def _find_empty_account_slot(self): response = self.get_request(self.root_uri + self.accounts_uri) if response['ret'] is False: return response data = response['data'] uris = [a.get('@odata.id') for a in data.get('Members', []) if a.get('@odata.id')] if uris: # first slot may be reserved, so move to end of list uris += [uris.pop(0)] for uri in uris: response = self.get_request(self.root_uri + uri) if response['ret'] is False: continue data = response['data'] headers = response['headers'] if data.get('UserName') == "" and not data.get('Enabled', True): return {'ret': True, 'data': data, 'headers': headers, 'uri': uri} return {'ret': False, 'no_match': True, 'msg': 'No empty account slot found'} def list_users(self): result = {} # listing all users has always been slower than other operations, why? user_list = [] users_results = [] # Get these entries, but does not fail if not found properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled'] response = self.get_request(self.root_uri + self.accounts_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for users in data.get('Members', []): user_list.append(users[u'@odata.id']) # user_list[] are URIs # for each user, get details for uri in user_list: user = {} response = self.get_request(self.root_uri + uri) if response['ret'] is False: return response data = response['data'] for property in properties: if property in data: user[property] = data[property] users_results.append(user) result["entries"] = users_results return result def add_user_via_patch(self, user): if user.get('account_id'): # If Id slot specified, use it response = self._find_account_uri(acct_id=user.get('account_id')) else: # Otherwise find first empty slot response = self._find_empty_account_slot() if not response['ret']: return response uri = response['uri'] payload = {} if user.get('account_username'): payload['UserName'] = user.get('account_username') if user.get('account_password'): payload['Password'] = user.get('account_password') if user.get('account_roleid'): payload['RoleId'] = user.get('account_roleid') response = self.patch_request(self.root_uri + uri, payload) if response['ret'] is False: return response return {'ret': True} def add_user(self, user): if not user.get('account_username'): return {'ret': False, 'msg': 'Must provide account_username for AddUser command'} response = self._find_account_uri(username=user.get('account_username')) if response['ret']: # account_username already exists, nothing to do return {'ret': True, 'changed': False} response = self.get_request(self.root_uri + self.accounts_uri) if not response['ret']: return response headers = response['headers'] if 'allow' in headers: methods = [m.strip() for m in headers.get('allow').split(',')] if 'POST' not in methods: # if Allow header present and POST not listed, add via PATCH return self.add_user_via_patch(user) payload = {} if user.get('account_username'): payload['UserName'] = user.get('account_username') if user.get('account_password'): payload['Password'] = user.get('account_password') if user.get('account_roleid'): payload['RoleId'] = user.get('account_roleid') response = self.post_request(self.root_uri + self.accounts_uri, payload) if not response['ret']: if response.get('status') == 405: # if POST returned a 405, try to add via PATCH return self.add_user_via_patch(user) else: return response return {'ret': True} def enable_user(self, user): response = self._find_account_uri(username=user.get('account_username'), acct_id=user.get('account_id')) if not response['ret']: return response uri = response['uri'] data = response['data'] if data.get('Enabled'): # account already enabled, nothing to do return {'ret': True, 'changed': False} payload = {'Enabled': True} response = self.patch_request(self.root_uri + uri, payload) if response['ret'] is False: return response return {'ret': True} def delete_user_via_patch(self, user, uri=None, data=None): if not uri: response = self._find_account_uri(username=user.get('account_username'), acct_id=user.get('account_id')) if not response['ret']: return response uri = response['uri'] data = response['data'] if data and data.get('UserName') == '' and not data.get('Enabled', False): # account UserName already cleared, nothing to do return {'ret': True, 'changed': False} payload = {'UserName': ''} if 'Enabled' in data: payload['Enabled'] = False response = self.patch_request(self.root_uri + uri, payload) if response['ret'] is False: return response return {'ret': True} def delete_user(self, user): response = self._find_account_uri(username=user.get('account_username'), acct_id=user.get('account_id')) if not response['ret']: if response.get('no_match'): # account does not exist, nothing to do return {'ret': True, 'changed': False} else: # some error encountered return response uri = response['uri'] headers = response['headers'] data = response['data'] if 'allow' in headers: methods = [m.strip() for m in headers.get('allow').split(',')] if 'DELETE' not in methods: # if Allow header present and DELETE not listed, del via PATCH return self.delete_user_via_patch(user, uri=uri, data=data) response = self.delete_request(self.root_uri + uri) if not response['ret']: if response.get('status') == 405: # if DELETE returned a 405, try to delete via PATCH return self.delete_user_via_patch(user, uri=uri, data=data) else: return response return {'ret': True} def disable_user(self, user): response = self._find_account_uri(username=user.get('account_username'), acct_id=user.get('account_id')) if not response['ret']: return response uri = response['uri'] data = response['data'] if not data.get('Enabled'): # account already disabled, nothing to do return {'ret': True, 'changed': False} payload = {'Enabled': False} response = self.patch_request(self.root_uri + uri, payload) if response['ret'] is False: return response return {'ret': True} def update_user_role(self, user): if not user.get('account_roleid'): return {'ret': False, 'msg': 'Must provide account_roleid for UpdateUserRole command'} response = self._find_account_uri(username=user.get('account_username'), acct_id=user.get('account_id')) if not response['ret']: return response uri = response['uri'] data = response['data'] if data.get('RoleId') == user.get('account_roleid'): # account already has RoleId , nothing to do return {'ret': True, 'changed': False} payload = {'RoleId': user.get('account_roleid')} response = self.patch_request(self.root_uri + uri, payload) if response['ret'] is False: return response return {'ret': True} def update_user_password(self, user): response = self._find_account_uri(username=user.get('account_username'), acct_id=user.get('account_id')) if not response['ret']: return response uri = response['uri'] payload = {'Password': user['account_password']} response = self.patch_request(self.root_uri + uri, payload) if response['ret'] is False: return response return {'ret': True} def get_sessions(self): result = {} # listing all users has always been slower than other operations, why? session_list = [] sessions_results = [] # Get these entries, but does not fail if not found properties = ['Description', 'Id', 'Name', 'UserName'] response = self.get_request(self.root_uri + self.sessions_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for sessions in data[u'Members']: session_list.append(sessions[u'@odata.id']) # session_list[] are URIs # for each session, get details for uri in session_list: session = {} response = self.get_request(self.root_uri + uri) if response['ret'] is False: return response data = response['data'] for property in properties: if property in data: session[property] = data[property] sessions_results.append(session) result["entries"] = sessions_results return result def get_firmware_update_capabilities(self): result = {} response = self.get_request(self.root_uri + self.update_uri) if response['ret'] is False: return response result['ret'] = True result['entries'] = {} data = response['data'] if "Actions" in data: actions = data['Actions'] if len(actions) > 0: for key in actions.keys(): action = actions.get(key) if 'title' in action: title = action['title'] else: title = key result['entries'][title] = action.get('[email protected]', ["Key [email protected] not found"]) else: return {'ret': "False", 'msg': "Actions list is empty."} else: return {'ret': "False", 'msg': "Key Actions not found."} return result def get_firmware_inventory(self): result = {} response = self.get_request(self.root_uri + self.firmware_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] result['entries'] = [] for device in data[u'Members']: uri = self.root_uri + device[u'@odata.id'] # Get details for each device response = self.get_request(uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] firmware = {} # Get these standard properties if present for key in ['Name', 'Id', 'Status', 'Version', 'Updateable', 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer', 'ReleaseDate']: if key in data: firmware[key] = data.get(key) result['entries'].append(firmware) return result def get_bios_attributes(self, systems_uri): result = {} bios_attributes = {} key = "Bios" # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} bios_uri = data[key]["@odata.id"] response = self.get_request(self.root_uri + bios_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for attribute in data[u'Attributes'].items(): bios_attributes[attribute[0]] = attribute[1] result["entries"] = bios_attributes return result def get_multi_bios_attributes(self): return self.aggregate(self.get_bios_attributes) def get_boot_order(self, systems_uri): result = {} # Get these entries from BootOption, if present properties = ['DisplayName', 'BootOptionReference'] # Retrieve System resource response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] # Confirm needed Boot properties are present if 'Boot' not in data or 'BootOrder' not in data['Boot']: return {'ret': False, 'msg': "Key BootOrder not found"} boot = data['Boot'] boot_order = boot['BootOrder'] # Retrieve BootOptions if present if 'BootOptions' in boot and '@odata.id' in boot['BootOptions']: boot_options_uri = boot['BootOptions']["@odata.id"] # Get BootOptions resource response = self.get_request(self.root_uri + boot_options_uri) if response['ret'] is False: return response data = response['data'] # Retrieve Members array if 'Members' not in data: return {'ret': False, 'msg': "Members not found in BootOptionsCollection"} members = data['Members'] else: members = [] # Build dict of BootOptions keyed by BootOptionReference boot_options_dict = {} for member in members: if '@odata.id' not in member: return {'ret': False, 'msg': "@odata.id not found in BootOptions"} boot_option_uri = member['@odata.id'] response = self.get_request(self.root_uri + boot_option_uri) if response['ret'] is False: return response data = response['data'] if 'BootOptionReference' not in data: return {'ret': False, 'msg': "BootOptionReference not found in BootOption"} boot_option_ref = data['BootOptionReference'] # fetch the props to display for this boot device boot_props = {} for prop in properties: if prop in data: boot_props[prop] = data[prop] boot_options_dict[boot_option_ref] = boot_props # Build boot device list boot_device_list = [] for ref in boot_order: boot_device_list.append( boot_options_dict.get(ref, {'BootOptionReference': ref})) result["entries"] = boot_device_list return result def get_multi_boot_order(self): return self.aggregate(self.get_boot_order) def get_boot_override(self, systems_uri): result = {} properties = ["BootSourceOverrideEnabled", "BootSourceOverrideTarget", "BootSourceOverrideMode", "UefiTargetBootSourceOverride", "[email protected]"] response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if 'Boot' not in data: return {'ret': False, 'msg': "Key Boot not found"} boot = data['Boot'] boot_overrides = {} if "BootSourceOverrideEnabled" in boot: if boot["BootSourceOverrideEnabled"] is not False: for property in properties: if property in boot: if boot[property] is not None: boot_overrides[property] = boot[property] else: return {'ret': False, 'msg': "No boot override is enabled."} result['entries'] = boot_overrides return result def get_multi_boot_override(self): return self.aggregate(self.get_boot_override) def set_bios_default_settings(self): result = {} key = "Bios" # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + self.systems_uris[0]) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} bios_uri = data[key]["@odata.id"] # Extract proper URI response = self.get_request(self.root_uri + bios_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] reset_bios_settings_uri = data["Actions"]["#Bios.ResetBios"]["target"] response = self.post_request(self.root_uri + reset_bios_settings_uri, {}) if response['ret'] is False: return response return {'ret': True, 'changed': True, 'msg': "Set BIOS to default settings"} def set_one_time_boot_device(self, bootdevice, uefi_target, boot_next): result = {} key = "Boot" if not bootdevice: return {'ret': False, 'msg': "bootdevice option required for SetOneTimeBoot"} # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + self.systems_uris[0]) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} boot = data[key] annotation = '[email protected]' if annotation in boot: allowable_values = boot[annotation] if isinstance(allowable_values, list) and bootdevice not in allowable_values: return {'ret': False, 'msg': "Boot device %s not in list of allowable values (%s)" % (bootdevice, allowable_values)} # read existing values enabled = boot.get('BootSourceOverrideEnabled') target = boot.get('BootSourceOverrideTarget') cur_uefi_target = boot.get('UefiTargetBootSourceOverride') cur_boot_next = boot.get('BootNext') if bootdevice == 'UefiTarget': if not uefi_target: return {'ret': False, 'msg': "uefi_target option required to SetOneTimeBoot for UefiTarget"} if enabled == 'Once' and target == bootdevice and uefi_target == cur_uefi_target: # If properties are already set, no changes needed return {'ret': True, 'changed': False} payload = { 'Boot': { 'BootSourceOverrideEnabled': 'Once', 'BootSourceOverrideTarget': bootdevice, 'UefiTargetBootSourceOverride': uefi_target } } elif bootdevice == 'UefiBootNext': if not boot_next: return {'ret': False, 'msg': "boot_next option required to SetOneTimeBoot for UefiBootNext"} if enabled == 'Once' and target == bootdevice and boot_next == cur_boot_next: # If properties are already set, no changes needed return {'ret': True, 'changed': False} payload = { 'Boot': { 'BootSourceOverrideEnabled': 'Once', 'BootSourceOverrideTarget': bootdevice, 'BootNext': boot_next } } else: if enabled == 'Once' and target == bootdevice: # If properties are already set, no changes needed return {'ret': True, 'changed': False} payload = { 'Boot': { 'BootSourceOverrideEnabled': 'Once', 'BootSourceOverrideTarget': bootdevice } } response = self.patch_request(self.root_uri + self.systems_uris[0], payload) if response['ret'] is False: return response return {'ret': True, 'changed': True} def set_bios_attributes(self, attr): result = {} key = "Bios" # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + self.systems_uris[0]) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} bios_uri = data[key]["@odata.id"] # Extract proper URI response = self.get_request(self.root_uri + bios_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] # First, check if BIOS attribute exists if attr['bios_attr_name'] not in data[u'Attributes']: return {'ret': False, 'msg': "BIOS attribute not found"} # Find out if value is already set to what we want. If yes, return if data[u'Attributes'][attr['bios_attr_name']] == attr['bios_attr_value']: return {'ret': True, 'changed': False, 'msg': "BIOS attribute already set"} set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"]["@odata.id"] # Example: bios_attr = {\"name\":\"value\"} bios_attr = "{\"" + attr['bios_attr_name'] + "\":\"" + attr['bios_attr_value'] + "\"}" payload = {"Attributes": json.loads(bios_attr)} response = self.patch_request(self.root_uri + set_bios_attr_uri, payload) if response['ret'] is False: return response return {'ret': True, 'changed': True, 'msg': "Modified BIOS attribute"} def get_chassis_inventory(self): result = {} chassis_results = [] # Get these entries, but does not fail if not found properties = ['ChassisType', 'PartNumber', 'AssetTag', 'Manufacturer', 'IndicatorLED', 'SerialNumber', 'Model'] # Go through list for chassis_uri in self.chassis_uri_list: response = self.get_request(self.root_uri + chassis_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] chassis_result = {} for property in properties: if property in data: chassis_result[property] = data[property] chassis_results.append(chassis_result) result["entries"] = chassis_results return result def get_fan_inventory(self): result = {} fan_results = [] key = "Thermal" # Get these entries, but does not fail if not found properties = ['FanName', 'Reading', 'ReadingUnits', 'Status'] # Go through list for chassis_uri in self.chassis_uri_list: response = self.get_request(self.root_uri + chassis_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key in data: # match: found an entry for "Thermal" information = fans thermal_uri = data[key]["@odata.id"] response = self.get_request(self.root_uri + thermal_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for device in data[u'Fans']: fan = {} for property in properties: if property in device: fan[property] = device[property] fan_results.append(fan) result["entries"] = fan_results return result def get_chassis_power(self): result = {} key = "Power" # Get these entries, but does not fail if not found properties = ['Name', 'PowerAllocatedWatts', 'PowerAvailableWatts', 'PowerCapacityWatts', 'PowerConsumedWatts', 'PowerMetrics', 'PowerRequestedWatts', 'RelatedItem', 'Status'] chassis_power_results = [] # Go through list for chassis_uri in self.chassis_uri_list: chassis_power_result = {} response = self.get_request(self.root_uri + chassis_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key in data: response = self.get_request(self.root_uri + data[key]['@odata.id']) data = response['data'] if 'PowerControl' in data: if len(data['PowerControl']) > 0: data = data['PowerControl'][0] for property in properties: if property in data: chassis_power_result[property] = data[property] else: return {'ret': False, 'msg': 'Key PowerControl not found.'} chassis_power_results.append(chassis_power_result) else: return {'ret': False, 'msg': 'Key Power not found.'} result['entries'] = chassis_power_results return result def get_chassis_thermals(self): result = {} sensors = [] key = "Thermal" # Get these entries, but does not fail if not found properties = ['Name', 'PhysicalContext', 'UpperThresholdCritical', 'UpperThresholdFatal', 'UpperThresholdNonCritical', 'LowerThresholdCritical', 'LowerThresholdFatal', 'LowerThresholdNonCritical', 'MaxReadingRangeTemp', 'MinReadingRangeTemp', 'ReadingCelsius', 'RelatedItem', 'SensorNumber'] # Go through list for chassis_uri in self.chassis_uri_list: response = self.get_request(self.root_uri + chassis_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key in data: thermal_uri = data[key]["@odata.id"] response = self.get_request(self.root_uri + thermal_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if "Temperatures" in data: for sensor in data[u'Temperatures']: sensor_result = {} for property in properties: if property in sensor: if sensor[property] is not None: sensor_result[property] = sensor[property] sensors.append(sensor_result) if sensors is None: return {'ret': False, 'msg': 'Key Temperatures was not found.'} result['entries'] = sensors return result def get_cpu_inventory(self, systems_uri): result = {} cpu_list = [] cpu_results = [] key = "Processors" # Get these entries, but does not fail if not found properties = ['Id', 'Manufacturer', 'Model', 'MaxSpeedMHz', 'TotalCores', 'TotalThreads', 'Status'] # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} processors_uri = data[key]["@odata.id"] # Get a list of all CPUs and build respective URIs response = self.get_request(self.root_uri + processors_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for cpu in data[u'Members']: cpu_list.append(cpu[u'@odata.id']) for c in cpu_list: cpu = {} uri = self.root_uri + c response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] for property in properties: if property in data: cpu[property] = data[property] cpu_results.append(cpu) result["entries"] = cpu_results return result def get_multi_cpu_inventory(self): return self.aggregate(self.get_cpu_inventory) def get_memory_inventory(self, systems_uri): result = {} memory_list = [] memory_results = [] key = "Memory" # Get these entries, but does not fail if not found properties = ['SerialNumber', 'MemoryDeviceType', 'PartNuber', 'MemoryLocation', 'RankCount', 'CapacityMiB', 'OperatingMemoryModes', 'Status', 'Manufacturer', 'Name'] # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} memory_uri = data[key]["@odata.id"] # Get a list of all DIMMs and build respective URIs response = self.get_request(self.root_uri + memory_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for dimm in data[u'Members']: memory_list.append(dimm[u'@odata.id']) for m in memory_list: dimm = {} uri = self.root_uri + m response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] if "Status" in data: if "State" in data["Status"]: if data["Status"]["State"] == "Absent": continue else: continue for property in properties: if property in data: dimm[property] = data[property] memory_results.append(dimm) result["entries"] = memory_results return result def get_multi_memory_inventory(self): return self.aggregate(self.get_memory_inventory) def get_nic_inventory(self, resource_uri): result = {} nic_list = [] nic_results = [] key = "EthernetInterfaces" # Get these entries, but does not fail if not found properties = ['Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses', 'NameServers', 'MACAddress', 'PermanentMACAddress', 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status'] response = self.get_request(self.root_uri + resource_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} ethernetinterfaces_uri = data[key]["@odata.id"] # Get a list of all network controllers and build respective URIs response = self.get_request(self.root_uri + ethernetinterfaces_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for nic in data[u'Members']: nic_list.append(nic[u'@odata.id']) for n in nic_list: nic = {} uri = self.root_uri + n response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] for property in properties: if property in data: nic[property] = data[property] nic_results.append(nic) result["entries"] = nic_results return result def get_multi_nic_inventory(self, resource_type): ret = True entries = [] # Given resource_type, use the proper URI if resource_type == 'Systems': resource_uris = self.systems_uris elif resource_type == 'Manager': # put in a list to match what we're doing with systems_uris resource_uris = [self.manager_uri] for resource_uri in resource_uris: inventory = self.get_nic_inventory(resource_uri) ret = inventory.pop('ret') and ret if 'entries' in inventory: entries.append(({'resource_uri': resource_uri}, inventory['entries'])) return dict(ret=ret, entries=entries) def get_virtualmedia(self, resource_uri): result = {} virtualmedia_list = [] virtualmedia_results = [] key = "VirtualMedia" # Get these entries, but does not fail if not found properties = ['Description', 'ConnectedVia', 'Id', 'MediaTypes', 'Image', 'ImageName', 'Name', 'WriteProtected', 'TransferMethod', 'TransferProtocolType'] response = self.get_request(self.root_uri + resource_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} virtualmedia_uri = data[key]["@odata.id"] # Get a list of all virtual media and build respective URIs response = self.get_request(self.root_uri + virtualmedia_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for virtualmedia in data[u'Members']: virtualmedia_list.append(virtualmedia[u'@odata.id']) for n in virtualmedia_list: virtualmedia = {} uri = self.root_uri + n response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] for property in properties: if property in data: virtualmedia[property] = data[property] virtualmedia_results.append(virtualmedia) result["entries"] = virtualmedia_results return result def get_multi_virtualmedia(self): ret = True entries = [] # Because _find_managers_resource() only find last Manager uri in self.manager_uri, not one list. This should be 1 issue. # I have to put manager_uri into list to reduce future changes when the issue is fixed. resource_uris = [self.manager_uri] for resource_uri in resource_uris: virtualmedia = self.get_virtualmedia(resource_uri) ret = virtualmedia.pop('ret') and ret if 'entries' in virtualmedia: entries.append(({'resource_uri': resource_uri}, virtualmedia['entries'])) return dict(ret=ret, entries=entries) def get_psu_inventory(self): result = {} psu_list = [] psu_results = [] key = "PowerSupplies" # Get these entries, but does not fail if not found properties = ['Name', 'Model', 'SerialNumber', 'PartNumber', 'Manufacturer', 'FirmwareVersion', 'PowerCapacityWatts', 'PowerSupplyType', 'Status'] # Get a list of all Chassis and build URIs, then get all PowerSupplies # from each Power entry in the Chassis chassis_uri_list = self.chassis_uri_list for chassis_uri in chassis_uri_list: response = self.get_request(self.root_uri + chassis_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] if 'Power' in data: power_uri = data[u'Power'][u'@odata.id'] else: continue response = self.get_request(self.root_uri + power_uri) data = response['data'] if key not in data: return {'ret': False, 'msg': "Key %s not found" % key} psu_list = data[key] for psu in psu_list: psu_not_present = False psu_data = {} for property in properties: if property in psu: if psu[property] is not None: if property == 'Status': if 'State' in psu[property]: if psu[property]['State'] == 'Absent': psu_not_present = True psu_data[property] = psu[property] if psu_not_present: continue psu_results.append(psu_data) result["entries"] = psu_results if not result["entries"]: return {'ret': False, 'msg': "No PowerSupply objects found"} return result def get_multi_psu_inventory(self): return self.aggregate(self.get_psu_inventory) def get_system_inventory(self, systems_uri): result = {} inventory = {} # Get these entries, but does not fail if not found properties = ['Status', 'HostName', 'PowerState', 'Model', 'Manufacturer', 'PartNumber', 'SystemType', 'AssetTag', 'ServiceTag', 'SerialNumber', 'SKU', 'BiosVersion', 'MemorySummary', 'ProcessorSummary', 'TrustedModules'] response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] for property in properties: if property in data: inventory[property] = data[property] result["entries"] = inventory return result def get_multi_system_inventory(self): return self.aggregate(self.get_system_inventory)
closed
ansible/ansible
https://github.com/ansible/ansible
61,569
Some azure _info modules still return ansible_facts when called as _info
##### SUMMARY Some of the modules still return something in `ansible_facts`, even though the result in there is an empty list: - ~~`azure_rm_publicipaddress_info`~~ - ~~`azure_rm_networkinterface_info`~~ - `azure_rm_storageaccount_info` - `azure_rm_virtualmachineimage_info` - `azure_rm_virtualmachinescaleset_info` - `azure_rm_virtualnetwork_info` Finally, `azure_rm_resourcegroup_info` always returns `ansible_facts` with content (which is also returned as non-`ansible_facts`). When called as `_info` modules, these modules should not have anything in `ansible_facts` when returning. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ~~azure_rm_publicipaddress_info~~ ~~azure_rm_networkinterface_info~~ azure_rm_resourcegroup_info azure_rm_storageaccount_info azure_rm_virtualmachineimage_info azure_rm_virtualmachinescaleset_info azure_rm_virtualnetwork_info ##### ANSIBLE VERSION ```paste below 2.9 ```
https://github.com/ansible/ansible/issues/61569
https://github.com/ansible/ansible/pull/61805
2a932ad7cfe27663d47cbd43de124fc756632361
e06dbe6e4f925b473acf0d03deda3b6d780a9dd3
2019-08-29T19:41:13Z
python
2019-09-05T09:51:56Z
changelogs/fragments/61805-azure-facts-info.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,569
Some azure _info modules still return ansible_facts when called as _info
##### SUMMARY Some of the modules still return something in `ansible_facts`, even though the result in there is an empty list: - ~~`azure_rm_publicipaddress_info`~~ - ~~`azure_rm_networkinterface_info`~~ - `azure_rm_storageaccount_info` - `azure_rm_virtualmachineimage_info` - `azure_rm_virtualmachinescaleset_info` - `azure_rm_virtualnetwork_info` Finally, `azure_rm_resourcegroup_info` always returns `ansible_facts` with content (which is also returned as non-`ansible_facts`). When called as `_info` modules, these modules should not have anything in `ansible_facts` when returning. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ~~azure_rm_publicipaddress_info~~ ~~azure_rm_networkinterface_info~~ azure_rm_resourcegroup_info azure_rm_storageaccount_info azure_rm_virtualmachineimage_info azure_rm_virtualmachinescaleset_info azure_rm_virtualnetwork_info ##### ANSIBLE VERSION ```paste below 2.9 ```
https://github.com/ansible/ansible/issues/61569
https://github.com/ansible/ansible/pull/61805
2a932ad7cfe27663d47cbd43de124fc756632361
e06dbe6e4f925b473acf0d03deda3b6d780a9dd3
2019-08-29T19:41:13Z
python
2019-09-05T09:51:56Z
lib/ansible/modules/cloud/azure/azure_rm_containerinstance_info.py
#!/usr/bin/python # # Copyright (c) 2017 Zim Kalinowski, <[email protected]> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_containerinstance_info version_added: "2.9" short_description: Get Azure Container Instance facts description: - Get facts of Container Instance. options: resource_group: description: - The name of the resource group. required: True name: description: - The name of the container instance. tags: description: - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. extends_documentation_fragment: - azure author: - Zim Kalinowski (@zikalino) ''' EXAMPLES = ''' - name: Get specific Container Instance facts azure_rm_containerinstance_info: resource_group: myResourceGroup name: myContainer - name: List Container Instances in a specified resource group name azure_rm_containerinstance_info: resource_group: myResourceGroup ''' RETURN = ''' container_groups: description: A list of Container Instance dictionaries. returned: always type: complex contains: id: description: - The resource id. returned: always type: str sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerInstance/contain erGroups/myContainer" resource_group: description: - Resource group where the container exists. returned: always type: str sample: testrg name: description: - The resource name. returned: always type: str sample: mycontainers location: description: - The resource location. returned: always type: str sample: westus os_type: description: - The OS type of containers. returned: always type: str sample: linux ip_address: description: - IP address of the container instance. returned: always type: str sample: 173.15.18.1 dns_name_label: description: - The Dns name label for the IP. returned: always type: str sample: mydomain ports: description: - List of ports exposed by the container instance. returned: always type: list sample: [ 80, 81 ] containers: description: - The containers within the container group. returned: always type: complex sample: containers contains: name: description: - The name of the container instance. returned: always type: str sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerInstance /containerGroups/myContainer" image: description: - The container image name. returned: always type: str sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerInstance /containerGroups/myContainer" memory: description: - The required memory of the containers in GB. returned: always type: float sample: 1.5 cpu: description: - The required number of CPU cores of the containers. returned: always type: int sample: 1 ports: description: - List of ports exposed within the container group. returned: always type: list sample: [ 80, 81 ] commands: description: - List of commands to execute within the container instance in exec form. returned: always type: list sample: [ "pip install abc" ] environment_variables: description: - List of container environment variables. type: complex contains: name: description: - Environment variable name. type: str value: description: - Environment variable value. type: str tags: description: Tags assigned to the resource. Dictionary of string:string pairs. type: dict sample: { "tag1": "abc" } ''' from ansible.module_utils.azure_rm_common import AzureRMModuleBase from ansible.module_utils.common.dict_transformations import _camel_to_snake try: from msrestazure.azure_exceptions import CloudError from msrestazure.azure_operation import AzureOperationPoller from azure.mgmt.containerinstance import ContainerInstanceManagementClient from msrest.serialization import Model except ImportError: # This is handled in azure_rm_common pass class AzureRMContainerInstanceInfo(AzureRMModuleBase): def __init__(self): # define user inputs into argument self.module_arg_spec = dict( resource_group=dict( type='str', required=True ), name=dict( type='str' ), tags=dict( type='list' ) ) # store the results of the module operation self.results = dict( changed=False, ansible_facts=dict() ) self.resource_group = None self.name = None super(AzureRMContainerInstanceInfo, self).__init__(self.module_arg_spec, supports_tags=False) def exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_containerinstance_facts' if is_old_facts: self.module.deprecate("The 'azure_rm_containerinstance_facts' module has been renamed to 'azure_rm_containerinstance_info'", version='2.13') for key in self.module_arg_spec: setattr(self, key, kwargs[key]) if (self.name is not None): self.results['containerinstances'] = self.get() elif (self.resource_group is not None): self.results['containerinstances'] = self.list_by_resource_group() else: self.results['containerinstances'] = self.list_all() return self.results def get(self): response = None results = [] try: response = self.containerinstance_client.container_groups.get(resource_group_name=self.resource_group, container_group_name=self.name) self.log("Response : {0}".format(response)) except CloudError as e: self.log('Could not get facts for Container Instances.') if response is not None and self.has_tags(response.tags, self.tags): results.append(self.format_item(response)) return results def list_by_resource_group(self): response = None results = [] try: response = self.containerinstance_client.container_groups.list_by_resource_group(resource_group_name=self.resource_group) self.log("Response : {0}".format(response)) except CloudError as e: self.fail('Could not list facts for Container Instances.') if response is not None: for item in response: if self.has_tags(item.tags, self.tags): results.append(self.format_item(item)) return results def list_all(self): response = None results = [] try: response = self.containerinstance_client.container_groups.list() self.log("Response : {0}".format(response)) except CloudError as e: self.fail('Could not list facts for Container Instances.') if response is not None: for item in response: if self.has_tags(item.tags, self.tags): results.append(self.format_item(item)) return results def format_item(self, item): d = item.as_dict() containers = d['containers'] ports = d['ip_address']['ports'] if 'ip_address' in d else [] resource_group = d['id'].split('resourceGroups/')[1].split('/')[0] for port_index in range(len(ports)): ports[port_index] = ports[port_index]['port'] for container_index in range(len(containers)): old_container = containers[container_index] new_container = { 'name': old_container['name'], 'image': old_container['image'], 'memory': old_container['resources']['requests']['memory_in_gb'], 'cpu': old_container['resources']['requests']['cpu'], 'ports': [], 'commands': old_container.get('command'), 'environment_variables': old_container.get('environment_variables') } for port_index in range(len(old_container['ports'])): new_container['ports'].append(old_container['ports'][port_index]['port']) containers[container_index] = new_container d = { 'id': d['id'], 'resource_group': resource_group, 'name': d['name'], 'os_type': d['os_type'], 'dns_name_label': d['ip_address'].get('dns_name_label'), 'ip_address': d['ip_address']['ip'] if 'ip_address' in d else '', 'ports': ports, 'location': d['location'], 'containers': containers, 'restart_policy': _camel_to_snake(d.get('restart_policy')) if d.get('restart_policy') else None, 'tags': d.get('tags', None) } return d def main(): AzureRMContainerInstanceInfo() if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,569
Some azure _info modules still return ansible_facts when called as _info
##### SUMMARY Some of the modules still return something in `ansible_facts`, even though the result in there is an empty list: - ~~`azure_rm_publicipaddress_info`~~ - ~~`azure_rm_networkinterface_info`~~ - `azure_rm_storageaccount_info` - `azure_rm_virtualmachineimage_info` - `azure_rm_virtualmachinescaleset_info` - `azure_rm_virtualnetwork_info` Finally, `azure_rm_resourcegroup_info` always returns `ansible_facts` with content (which is also returned as non-`ansible_facts`). When called as `_info` modules, these modules should not have anything in `ansible_facts` when returning. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ~~azure_rm_publicipaddress_info~~ ~~azure_rm_networkinterface_info~~ azure_rm_resourcegroup_info azure_rm_storageaccount_info azure_rm_virtualmachineimage_info azure_rm_virtualmachinescaleset_info azure_rm_virtualnetwork_info ##### ANSIBLE VERSION ```paste below 2.9 ```
https://github.com/ansible/ansible/issues/61569
https://github.com/ansible/ansible/pull/61805
2a932ad7cfe27663d47cbd43de124fc756632361
e06dbe6e4f925b473acf0d03deda3b6d780a9dd3
2019-08-29T19:41:13Z
python
2019-09-05T09:51:56Z
lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset_info.py
#!/usr/bin/python # # Copyright (c) 2017 Obezimnaka Boms, <[email protected]> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_dnsrecordset_info version_added: "2.9" short_description: Get DNS Record Set facts description: - Get facts for a specific DNS Record Set in a Zone, or a specific type in all Zones or in one Zone etc. options: relative_name: description: - Only show results for a Record Set. resource_group: description: - Limit results by resource group. Required when filtering by name or type. zone_name: description: - Limit results by zones. Required when filtering by name or type. record_type: description: - Limit record sets by record type. top: description: - Limit the maximum number of record sets to return. type: int extends_documentation_fragment: - azure - azure_tags author: - Ozi Boms (@ozboms) ''' EXAMPLES = ''' - name: Get facts for one Record Set azure_rm_dnsrecordset_info: resource_group: myResourceGroup zone_name: example.com relative_name: server10 record_type: A - name: Get facts for all Type A Record Sets in a Zone azure_rm_dnsrecordset_info: resource_group: myResourceGroup zone_name: example.com record_type: A - name: Get all record sets in one zone azure_rm_dnsrecordset_info: resource_group: myResourceGroup zone_name: example.com ''' RETURN = ''' azure_dnsrecordset: description: - List of record set dicts. returned: always type: list example: [ { "etag": "60ac0480-44dd-4881-a2ed-680d20b3978e", "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/dnszones/newzone.com/A/servera", "name": "servera", "properties": { "ARecords": [ { "ipv4Address": "10.4.5.7" }, { "ipv4Address": "2.4.5.8" } ], "TTL": 12900 }, "type": "Microsoft.Network/dnszones/A" }] dnsrecordsets: description: - List of record set dicts, which shares the same hierarchy as M(azure_rm_dnsrecordset) module's parameter. returned: always type: list contains: id: description: - ID of the dns recordset. sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/dnszones/newzone. com/A/servera" relative_name: description: - Name of the dns recordset. sample: servera record_type: description: - The type of the record set. - Can be C(A), C(AAAA), C(CNAME), C(MX), C(NS), C(SRV), C(TXT), C(PTR). sample: A time_to_live: description: - Time to live of the record set in seconds. sample: 12900 records: description: - List of records depending on the type of recordset. sample: [ { "ipv4Address": "10.4.5.7" }, { "ipv4Address": "2.4.5.8" } ] provisioning_state: description: - Provision state of the resource. sample: Successed fqdn: description: - Fully qualified domain name of the record set. sample: www.newzone.com ''' from ansible.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError from azure.common import AzureMissingResourceHttpError, AzureHttpError except Exception: # This is handled in azure_rm_common pass AZURE_OBJECT_CLASS = 'RecordSet' RECORDSET_VALUE_MAP = dict( A='arecords', AAAA='aaaa_records', CNAME='cname_record', MX='mx_records', NS='ns_records', PTR='ptr_records', SRV='srv_records', TXT='txt_records', SOA='soa_record', CAA='caa_records' # FUTURE: add missing record types from https://github.com/Azure/azure-sdk-for-python/blob/master/azure-mgmt-dns/azure/mgmt/dns/models/record_set.py ) class AzureRMRecordSetInfo(AzureRMModuleBase): def __init__(self): # define user inputs into argument self.module_arg_spec = dict( relative_name=dict(type='str'), resource_group=dict(type='str'), zone_name=dict(type='str'), record_type=dict(type='str'), top=dict(type='int') ) # store the results of the module operation self.results = dict( changed=False, ansible_info=dict(azure_dnsrecordset=[]) ) self.relative_name = None self.resource_group = None self.zone_name = None self.record_type = None self.top = None super(AzureRMRecordSetInfo, self).__init__(self.module_arg_spec) def exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_dnsrecordset_facts' if is_old_facts: self.module.deprecate("The 'azure_rm_dnsrecordset_facts' module has been renamed to 'azure_rm_dnsrecordset_info'", version='2.13') for key in self.module_arg_spec: setattr(self, key, kwargs[key]) if not self.top or self.top <= 0: self.top = None # create conditionals to catch errors when calling record facts if self.relative_name and not self.resource_group: self.fail("Parameter error: resource group required when filtering by name or record type.") if self.relative_name and not self.zone_name: self.fail("Parameter error: DNS Zone required when filtering by name or record type.") results = [] # list the conditions for what to return based on input if self.relative_name is not None: # if there is a name listed, they want only facts about that specific Record Set itself results = self.get_item() elif self.record_type: # else, they just want all the record sets of a specific type results = self.list_type() elif self.zone_name: # if there is a zone name listed, then they want all the record sets in a zone results = self.list_zone() if is_old_facts: self.results['ansible_facts']['azure_dnsrecordset'] = self.serialize_list(results) self.results['dnsrecordsets'] = self.curated_list(results) return self.results def get_item(self): self.log('Get properties for {0}'.format(self.relative_name)) item = None results = [] # try to get information for specific Record Set try: item = self.dns_client.record_sets.get(self.resource_group, self.zone_name, self.relative_name, self.record_type) except CloudError: pass results = [item] return results def list_type(self): self.log('Lists the record sets of a specified type in a DNS zone') try: response = self.dns_client.record_sets.list_by_type(self.resource_group, self.zone_name, self.record_type, top=self.top) except AzureHttpError as exc: self.fail("Failed to list for record type {0} - {1}".format(self.record_type, str(exc))) results = [] for item in response: results.append(item) return results def list_zone(self): self.log('Lists all record sets in a DNS zone') try: response = self.dns_client.record_sets.list_by_dns_zone(self.resource_group, self.zone_name, top=self.top) except AzureHttpError as exc: self.fail("Failed to list for zone {0} - {1}".format(self.zone_name, str(exc))) results = [] for item in response: results.append(item) return results def serialize_list(self, raws): return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raws] if raws else [] def curated_list(self, raws): return [self.record_to_dict(item) for item in raws] if raws else [] def record_to_dict(self, record): record_type = record.type[len('Microsoft.Network/dnszones/'):] records = getattr(record, RECORDSET_VALUE_MAP.get(record_type)) if not isinstance(records, list): records = [records] return dict( id=record.id, relative_name=record.name, record_type=record_type, records=[x.as_dict() for x in records], time_to_live=record.ttl, fqdn=record.fqdn, provisioning_state=record.provisioning_state ) def main(): AzureRMRecordSetInfo() if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,569
Some azure _info modules still return ansible_facts when called as _info
##### SUMMARY Some of the modules still return something in `ansible_facts`, even though the result in there is an empty list: - ~~`azure_rm_publicipaddress_info`~~ - ~~`azure_rm_networkinterface_info`~~ - `azure_rm_storageaccount_info` - `azure_rm_virtualmachineimage_info` - `azure_rm_virtualmachinescaleset_info` - `azure_rm_virtualnetwork_info` Finally, `azure_rm_resourcegroup_info` always returns `ansible_facts` with content (which is also returned as non-`ansible_facts`). When called as `_info` modules, these modules should not have anything in `ansible_facts` when returning. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ~~azure_rm_publicipaddress_info~~ ~~azure_rm_networkinterface_info~~ azure_rm_resourcegroup_info azure_rm_storageaccount_info azure_rm_virtualmachineimage_info azure_rm_virtualmachinescaleset_info azure_rm_virtualnetwork_info ##### ANSIBLE VERSION ```paste below 2.9 ```
https://github.com/ansible/ansible/issues/61569
https://github.com/ansible/ansible/pull/61805
2a932ad7cfe27663d47cbd43de124fc756632361
e06dbe6e4f925b473acf0d03deda3b6d780a9dd3
2019-08-29T19:41:13Z
python
2019-09-05T09:51:56Z
lib/ansible/modules/cloud/azure/azure_rm_resourcegroup_info.py
#!/usr/bin/python # # Copyright (c) 2016 Matt Davis, <[email protected]> # Chris Houseknecht, <[email protected]> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_resourcegroup_info version_added: "2.1" short_description: Get resource group facts description: - Get facts for a specific resource group or all resource groups. options: name: description: - Limit results to a specific resource group. tags: description: - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. list_resources: description: - List all resources under the resource group. - Note this will cost network overhead for each resource group. Suggest use this when I(name) set. version_added: "2.8" extends_documentation_fragment: - azure author: - Chris Houseknecht (@chouseknecht) - Matt Davis (@nitzmahone) ''' EXAMPLES = ''' - name: Get facts for one resource group azure_rm_resourcegroup_info: name: myResourceGroup - name: Get facts for all resource groups azure_rm_resourcegroup_info: - name: Get facts by tags azure_rm_resourcegroup_info: tags: - testing - foo:bar - name: Get facts for one resource group including resources it contains azure_rm_resourcegroup_info: name: myResourceGroup list_resources: yes ''' RETURN = ''' azure_resourcegroups: description: - List of resource group dicts. returned: always type: list contains: id: description: - Resource id. returned: always type: str sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup" name: description: - Resource group name. returned: always type: str sample: foo tags: description: - Tags assigned to resource group. returned: always type: dict sample: { "tag": "value" } resources: description: - List of resources under the resource group. returned: when I(list_resources=yes). type: list contains: id: description: - Resource id. returned: always type: str sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMa chines/myVirtualMachine" name: description: - Resource name. returned: always type: str sample: myVirtualMachine location: description: - Resource region. returned: always type: str sample: eastus type: description: - Resource type. returned: always type: str sample: "Microsoft.Compute/virtualMachines" tags: description: - Tags to assign to the managed disk. returned: always type: dict sample: { "tag": "value" } ''' try: from msrestazure.azure_exceptions import CloudError except Exception: # This is handled in azure_rm_common pass from ansible.module_utils.azure_rm_common import AzureRMModuleBase AZURE_OBJECT_CLASS = 'ResourceGroup' class AzureRMResourceGroupInfo(AzureRMModuleBase): def __init__(self): self.module_arg_spec = dict( name=dict(type='str'), tags=dict(type='list'), list_resources=dict(type='bool') ) self.results = dict( changed=False, ansible_facts=dict(azure_resourcegroups=[]), resourcegroups=[] ) self.name = None self.tags = None self.list_resources = None super(AzureRMResourceGroupInfo, self).__init__(self.module_arg_spec, supports_tags=False, facts_module=True) def exec_module(self, **kwargs): for key in self.module_arg_spec: setattr(self, key, kwargs[key]) if self.name: self.results['ansible_facts']['azure_resourcegroups'] = self.get_item() else: self.results['ansible_facts']['azure_resourcegroups'] = self.list_items() if self.list_resources: for item in self.results['ansible_facts']['azure_resourcegroups']: item['resources'] = self.list_by_rg(item['name']) self.results['resourcegroups'] = self.results['ansible_facts']['azure_resourcegroups'] return self.results def get_item(self): self.log('Get properties for {0}'.format(self.name)) item = None result = [] try: item = self.rm_client.resource_groups.get(self.name) except CloudError: pass if item and self.has_tags(item.tags, self.tags): result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)] return result def list_items(self): self.log('List all items') try: response = self.rm_client.resource_groups.list() except CloudError as exc: self.fail("Failed to list all items - {0}".format(str(exc))) results = [] for item in response: if self.has_tags(item.tags, self.tags): results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS)) return results def list_by_rg(self, name): self.log('List resources under resource group') results = [] try: response = self.rm_client.resources.list_by_resource_group(name) while True: results.append(response.next().as_dict()) except StopIteration: pass except CloudError as exc: self.fail('Error when listing resources under resource group {0}: {1}'.format(name, exc.message or str(exc))) return results def main(): AzureRMResourceGroupInfo() if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,569
Some azure _info modules still return ansible_facts when called as _info
##### SUMMARY Some of the modules still return something in `ansible_facts`, even though the result in there is an empty list: - ~~`azure_rm_publicipaddress_info`~~ - ~~`azure_rm_networkinterface_info`~~ - `azure_rm_storageaccount_info` - `azure_rm_virtualmachineimage_info` - `azure_rm_virtualmachinescaleset_info` - `azure_rm_virtualnetwork_info` Finally, `azure_rm_resourcegroup_info` always returns `ansible_facts` with content (which is also returned as non-`ansible_facts`). When called as `_info` modules, these modules should not have anything in `ansible_facts` when returning. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ~~azure_rm_publicipaddress_info~~ ~~azure_rm_networkinterface_info~~ azure_rm_resourcegroup_info azure_rm_storageaccount_info azure_rm_virtualmachineimage_info azure_rm_virtualmachinescaleset_info azure_rm_virtualnetwork_info ##### ANSIBLE VERSION ```paste below 2.9 ```
https://github.com/ansible/ansible/issues/61569
https://github.com/ansible/ansible/pull/61805
2a932ad7cfe27663d47cbd43de124fc756632361
e06dbe6e4f925b473acf0d03deda3b6d780a9dd3
2019-08-29T19:41:13Z
python
2019-09-05T09:51:56Z
lib/ansible/modules/cloud/azure/azure_rm_sqlserver_info.py
#!/usr/bin/python # # Copyright (c) 2017 Zim Kalinowski, <[email protected]> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_sqlserver_info version_added: "2.9" short_description: Get SQL Server facts description: - Get facts of SQL Server. options: resource_group: description: - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. required: True server_name: description: - The name of the server. extends_documentation_fragment: - azure - azure_tags author: - Zim Kalinowski (@zikalino) ''' EXAMPLES = ''' - name: Get instance of SQL Server azure_rm_sqlserver_info: resource_group: myResourceGroup server_name: server_name - name: List instances of SQL Server azure_rm_sqlserver_info: resource_group: myResourceGroup ''' RETURN = ''' servers: description: - A list of dict results where the key is the name of the SQL Server and the values are the facts for that SQL Server. returned: always type: complex contains: sqlserver_name: description: - The key is the name of the server that the values relate to. type: complex contains: id: description: - Resource ID. returned: always type: str sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/sqlcrudtest-4645 name: description: - Resource name. returned: always type: str sample: sqlcrudtest-4645 type: description: - Resource type. returned: always type: str sample: Microsoft.Sql/servers location: description: - Resource location. returned: always type: str sample: japaneast kind: description: - Kind of sql server. This is metadata used for the Azure portal experience. returned: always type: str sample: v12.0 version: description: - The version of the server. returned: always type: str sample: 12.0 state: description: - The state of the server. returned: always type: str sample: Ready fully_qualified_domain_name: description: - The fully qualified domain name of the server. returned: always type: str sample: fully_qualified_domain_name ''' from ansible.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError from azure.mgmt.sql import SqlManagementClient from msrest.serialization import Model except ImportError: # This is handled in azure_rm_common pass class AzureRMSqlServerInfo(AzureRMModuleBase): def __init__(self): # define user inputs into argument self.module_arg_spec = dict( resource_group=dict( type='str', required=True ), server_name=dict( type='str' ) ) # store the results of the module operation self.results = dict( changed=False, ansible_facts=dict() ) self.resource_group = None self.server_name = None super(AzureRMSqlServerInfo, self).__init__(self.module_arg_spec) def exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_sqlserver_facts' if is_old_facts: self.module.deprecate("The 'azure_rm_sqlserver_facts' module has been renamed to 'azure_rm_sqlserver_info'", version='2.13') for key in self.module_arg_spec: setattr(self, key, kwargs[key]) if (self.resource_group is not None and self.server_name is not None): self.results['servers'] = self.get() elif (self.resource_group is not None): self.results['servers'] = self.list_by_resource_group() return self.results def get(self): ''' Gets facts of the specified SQL Server. :return: deserialized SQL Serverinstance state dictionary ''' response = None results = {} try: response = self.sql_client.servers.get(resource_group_name=self.resource_group, server_name=self.server_name) self.log("Response : {0}".format(response)) except CloudError as e: self.log('Could not get facts for Servers.') if response is not None: results[response.name] = response.as_dict() return results def list_by_resource_group(self): ''' Gets facts of the specified SQL Server. :return: deserialized SQL Serverinstance state dictionary ''' response = None results = {} try: response = self.sql_client.servers.list_by_resource_group(resource_group_name=self.resource_group) self.log("Response : {0}".format(response)) except CloudError as e: self.log('Could not get facts for Servers.') if response is not None: for item in response: results[item.name] = item.as_dict() return results def main(): AzureRMSqlServerInfo() if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,569
Some azure _info modules still return ansible_facts when called as _info
##### SUMMARY Some of the modules still return something in `ansible_facts`, even though the result in there is an empty list: - ~~`azure_rm_publicipaddress_info`~~ - ~~`azure_rm_networkinterface_info`~~ - `azure_rm_storageaccount_info` - `azure_rm_virtualmachineimage_info` - `azure_rm_virtualmachinescaleset_info` - `azure_rm_virtualnetwork_info` Finally, `azure_rm_resourcegroup_info` always returns `ansible_facts` with content (which is also returned as non-`ansible_facts`). When called as `_info` modules, these modules should not have anything in `ansible_facts` when returning. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ~~azure_rm_publicipaddress_info~~ ~~azure_rm_networkinterface_info~~ azure_rm_resourcegroup_info azure_rm_storageaccount_info azure_rm_virtualmachineimage_info azure_rm_virtualmachinescaleset_info azure_rm_virtualnetwork_info ##### ANSIBLE VERSION ```paste below 2.9 ```
https://github.com/ansible/ansible/issues/61569
https://github.com/ansible/ansible/pull/61805
2a932ad7cfe27663d47cbd43de124fc756632361
e06dbe6e4f925b473acf0d03deda3b6d780a9dd3
2019-08-29T19:41:13Z
python
2019-09-05T09:51:56Z
lib/ansible/modules/cloud/azure/azure_rm_storageaccount_info.py
#!/usr/bin/python # # Copyright (c) 2016 Matt Davis, <[email protected]> # Chris Houseknecht, <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_storageaccount_info version_added: "2.9" short_description: Get storage account facts description: - Get facts for one storage account or all storage accounts within a resource group. options: name: description: - Only show results for a specific account. resource_group: description: - Limit results to a resource group. Required when filtering by name. aliases: - resource_group_name tags: description: - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. show_connection_string: description: - Show the connection string for each of the storageaccount's endpoints. - For convenient usage, C(show_connection_string) will also show the access keys for each of the storageaccount's endpoints. - Note that it will cost a lot of time when list all storageaccount rather than query a single one. type: bool version_added: "2.8" show_blob_cors: description: - Show the blob CORS settings for each blob related to the storage account. - Querying all storage accounts will take a long time. type: bool version_added: "2.8" extends_documentation_fragment: - azure author: - Chris Houseknecht (@chouseknecht) - Matt Davis (@nitzmahone) ''' EXAMPLES = ''' - name: Get facts for one account azure_rm_storageaccount_info: resource_group: myResourceGroup name: clh0002 - name: Get facts for all accounts in a resource group azure_rm_storageaccount_info: resource_group: myResourceGroup - name: Get facts for all accounts by tags azure_rm_storageaccount_info: tags: - testing - foo:bar ''' RETURN = ''' azure_storageaccounts: description: - List of storage account dicts. returned: always type: list example: [{ "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/myResourceGroups/testing/providers/Microsoft.Storage/storageAccounts/testaccount001", "location": "eastus2", "name": "testaccount001", "properties": { "accountType": "Standard_LRS", "creationTime": "2016-03-28T02:46:58.290113Z", "primaryEndpoints": { "blob": "https://testaccount001.blob.core.windows.net/", "file": "https://testaccount001.file.core.windows.net/", "queue": "https://testaccount001.queue.core.windows.net/", "table": "https://testaccount001.table.core.windows.net/" }, "primaryLocation": "eastus2", "provisioningState": "Succeeded", "statusOfPrimary": "Available" }, "tags": {}, "type": "Microsoft.Storage/storageAccounts" }] storageaccounts: description: - List of storage account dicts in resource module's parameter format. returned: always type: complex contains: id: description: - Resource ID. returned: always type: str sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/t estaccount001" name: description: - Name of the storage account to update or create. returned: always type: str sample: testaccount001 location: description: - Valid Azure location. Defaults to location of the resource group. returned: always type: str sample: eastus account_type: description: - Type of storage account. - C(Standard_ZRS) and C(Premium_LRS) accounts cannot be changed to other account types. - Other account types cannot be changed to C(Standard_ZRS) or C(Premium_LRS). returned: always type: str sample: Standard_ZRS custom_domain: description: - User domain assigned to the storage account. - Must be a dictionary with I(name) and I(use_sub_domain) keys where I(name) is the CNAME source. returned: always type: complex contains: name: description: - CNAME source. returned: always type: str sample: testaccount use_sub_domain: description: - Whether to use sub domain. returned: always type: bool sample: true kind: description: - The kind of storage. returned: always type: str sample: Storage access_tier: description: - The access tier for this storage account. returned: always type: str sample: Hot https_only: description: - Allows https traffic only to storage service when set to C(true). returned: always type: bool sample: false provisioning_state: description: - The status of the storage account at the time the operation was called. - Possible values include C(Creating), C(ResolvingDNS), C(Succeeded). returned: always type: str sample: Succeeded secondary_location: description: - The location of the geo-replicated secondary for the storage account. - Only available if the I(account_type=Standard_GRS) or I(account_type=Standard_RAGRS). returned: always type: str sample: westus status_of_primary: description: - Status of the primary location of the storage account; either C(available) or C(unavailable). returned: always type: str sample: available status_of_secondary: description: - Status of the secondary location of the storage account; either C(available) or C(unavailable). returned: always type: str sample: available primary_location: description: - The location of the primary data center for the storage account. returned: always type: str sample: eastus primary_endpoints: description: - URLs to retrieve a public I(blob), I(queue), or I(table) object. - Note that C(Standard_ZRS) and C(Premium_LRS) accounts only return the blob endpoint. returned: always type: complex contains: blob: description: - The primary blob endpoint and connection string. returned: always type: complex contains: endpoint: description: - The primary blob endpoint. returned: always type: str sample: "https://testaccount001.blob.core.windows.net/" connectionstring: description: - Connectionstring of the blob endpoint. returned: always type: str sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;BlobEndpoint=X" queue: description: - The primary queue endpoint and connection string. returned: always type: complex contains: endpoint: description: - The primary queue endpoint. returned: always type: str sample: "https://testaccount001.queue.core.windows.net/" connectionstring: description: - Connectionstring of the queue endpoint. returned: always type: str sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;QueueEndpoint=X" table: description: - The primary table endpoint and connection string. returned: always type: complex contains: endpoint: description: - The primary table endpoint. returned: always type: str sample: "https://testaccount001.table.core.windows.net/" connectionstring: description: - Connectionstring of the table endpoint. returned: always type: str sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;TableEndpoint=X" key: description: - The account key for the primary_endpoints returned: always type: str sample: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx secondary_endpoints: description: - The URLs to retrieve a public I(blob), I(queue), or I(table) object from the secondary location. - Only available if the SKU I(name=Standard_RAGRS). returned: always type: complex contains: blob: description: - The secondary blob endpoint and connection string. returned: always type: complex contains: endpoint: description: - The secondary blob endpoint. returned: always type: str sample: "https://testaccount001.blob.core.windows.net/" connectionstring: description: - Connectionstring of the blob endpoint. returned: always type: str sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;BlobEndpoint=X" queue: description: - The secondary queue endpoint and connection string. returned: always type: complex contains: endpoint: description: - The secondary queue endpoint. returned: always type: str sample: "https://testaccount001.queue.core.windows.net/" connectionstring: description: - Connectionstring of the queue endpoint. returned: always type: str sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;QueueEndpoint=X" table: description: - The secondary table endpoint and connection string. returned: always type: complex contains: endpoint: description: - The secondary table endpoint. returned: always type: str sample: "https://testaccount001.table.core.windows.net/" connectionstring: description: - Connectionstring of the table endpoint. returned: always type: str sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;TableEndpoint=X" key: description: - The account key for the secondary_endpoints sample: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx tags: description: - Resource tags. returned: always type: dict sample: { "tag1": "abc" } ''' try: from msrestazure.azure_exceptions import CloudError except Exception: # This is handled in azure_rm_common pass from ansible.module_utils.azure_rm_common import AzureRMModuleBase from ansible.module_utils._text import to_native AZURE_OBJECT_CLASS = 'StorageAccount' class AzureRMStorageAccountInfo(AzureRMModuleBase): def __init__(self): self.module_arg_spec = dict( name=dict(type='str'), resource_group=dict(type='str', aliases=['resource_group_name']), tags=dict(type='list'), show_connection_string=dict(type='bool'), show_blob_cors=dict(type='bool') ) self.results = dict( changed=False, ansible_facts=dict(azure_storageaccounts=[]), storageaccounts=[] ) self.name = None self.resource_group = None self.tags = None self.show_connection_string = None self.show_blob_cors = None super(AzureRMStorageAccountInfo, self).__init__(self.module_arg_spec, supports_tags=False, facts_module=True) def exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_storageaccount_facts' if is_old_facts: self.module.deprecate("The 'azure_rm_storageaccount_facts' module has been renamed to 'azure_rm_storageaccount_info'", version='2.13') for key in self.module_arg_spec: setattr(self, key, kwargs[key]) if self.name and not self.resource_group: self.fail("Parameter error: resource group required when filtering by name.") results = [] if self.name: results = self.get_account() elif self.resource_group: results = self.list_resource_group() else: results = self.list_all() filtered = self.filter_tag(results) if is_old_facts: self.results['ansible_facts']['azure_storageaccounts'] = self.serialize(filtered) self.results['ansible_facts']['storageaccounts'] = self.format_to_dict(filtered) self.results['storageaccounts'] = self.format_to_dict(filtered) return self.results def get_account(self): self.log('Get properties for account {0}'.format(self.name)) account = None try: account = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name) return [account] except CloudError: pass return [] def list_resource_group(self): self.log('List items') try: response = self.storage_client.storage_accounts.list_by_resource_group(self.resource_group) except Exception as exc: self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc))) return response def list_all(self): self.log('List all items') try: response = self.storage_client.storage_accounts.list_by_resource_group(self.resource_group) except Exception as exc: self.fail("Error listing all items - {0}".format(str(exc))) return response def filter_tag(self, raw): return [item for item in raw if self.has_tags(item.tags, self.tags)] def serialize(self, raw): return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raw] def format_to_dict(self, raw): return [self.account_obj_to_dict(item) for item in raw] def account_obj_to_dict(self, account_obj, blob_service_props=None): account_dict = dict( id=account_obj.id, name=account_obj.name, location=account_obj.location, access_tier=(account_obj.access_tier.value if account_obj.access_tier is not None else None), account_type=account_obj.sku.name.value, kind=account_obj.kind.value if account_obj.kind else None, provisioning_state=account_obj.provisioning_state.value, secondary_location=account_obj.secondary_location, status_of_primary=(account_obj.status_of_primary.value if account_obj.status_of_primary is not None else None), status_of_secondary=(account_obj.status_of_secondary.value if account_obj.status_of_secondary is not None else None), primary_location=account_obj.primary_location, https_only=account_obj.enable_https_traffic_only ) id_dict = self.parse_resource_to_dict(account_obj.id) account_dict['resource_group'] = id_dict.get('resource_group') account_key = self.get_connectionstring(account_dict['resource_group'], account_dict['name']) account_dict['custom_domain'] = None if account_obj.custom_domain: account_dict['custom_domain'] = dict( name=account_obj.custom_domain.name, use_sub_domain=account_obj.custom_domain.use_sub_domain ) account_dict['primary_endpoints'] = None if account_obj.primary_endpoints: account_dict['primary_endpoints'] = dict( blob=self.format_endpoint_dict(account_dict['name'], account_key[0], account_obj.primary_endpoints.blob, 'blob'), queue=self.format_endpoint_dict(account_dict['name'], account_key[0], account_obj.primary_endpoints.queue, 'queue'), table=self.format_endpoint_dict(account_dict['name'], account_key[0], account_obj.primary_endpoints.table, 'table') ) if account_key[0]: account_dict['primary_endpoints']['key'] = '{0}'.format(account_key[0]) account_dict['secondary_endpoints'] = None if account_obj.secondary_endpoints: account_dict['secondary_endpoints'] = dict( blob=self.format_endpoint_dict(account_dict['name'], account_key[1], account_obj.primary_endpoints.blob, 'blob'), queue=self.format_endpoint_dict(account_dict['name'], account_key[1], account_obj.primary_endpoints.queue, 'queue'), table=self.format_endpoint_dict(account_dict['name'], account_key[1], account_obj.primary_endpoints.table, 'table'), ) if account_key[1]: account_dict['secondary_endpoints']['key'] = '{0}'.format(account_key[1]) account_dict['tags'] = None if account_obj.tags: account_dict['tags'] = account_obj.tags blob_service_props = self.get_blob_service_props(account_dict['resource_group'], account_dict['name']) if blob_service_props and blob_service_props.cors and blob_service_props.cors.cors_rules: account_dict['blob_cors'] = [dict( allowed_origins=to_native(x.allowed_origins), allowed_methods=to_native(x.allowed_methods), max_age_in_seconds=x.max_age_in_seconds, exposed_headers=to_native(x.exposed_headers), allowed_headers=to_native(x.allowed_headers) ) for x in blob_service_props.cors.cors_rules] return account_dict def format_endpoint_dict(self, name, key, endpoint, storagetype, protocol='https'): result = dict(endpoint=endpoint) if key: result['connectionstring'] = 'DefaultEndpointsProtocol={0};EndpointSuffix={1};AccountName={2};AccountKey={3};{4}Endpoint={5}'.format( protocol, self._cloud_environment.suffixes.storage_endpoint, name, key, str.title(storagetype), endpoint) return result def get_blob_service_props(self, resource_group, name): if not self.show_blob_cors: return None try: blob_service_props = self.storage_client.blob_services.get_service_properties(resource_group, name) return blob_service_props except Exception: pass return None def get_connectionstring(self, resource_group, name): keys = ['', ''] if not self.show_connection_string: return keys try: cred = self.storage_client.storage_accounts.list_keys(resource_group, name) # get the following try catch from CLI try: keys = [cred.keys[0].value, cred.keys[1].value] except AttributeError: keys = [cred.key1, cred.key2] except Exception: pass return keys def main(): AzureRMStorageAccountInfo() if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,569
Some azure _info modules still return ansible_facts when called as _info
##### SUMMARY Some of the modules still return something in `ansible_facts`, even though the result in there is an empty list: - ~~`azure_rm_publicipaddress_info`~~ - ~~`azure_rm_networkinterface_info`~~ - `azure_rm_storageaccount_info` - `azure_rm_virtualmachineimage_info` - `azure_rm_virtualmachinescaleset_info` - `azure_rm_virtualnetwork_info` Finally, `azure_rm_resourcegroup_info` always returns `ansible_facts` with content (which is also returned as non-`ansible_facts`). When called as `_info` modules, these modules should not have anything in `ansible_facts` when returning. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ~~azure_rm_publicipaddress_info~~ ~~azure_rm_networkinterface_info~~ azure_rm_resourcegroup_info azure_rm_storageaccount_info azure_rm_virtualmachineimage_info azure_rm_virtualmachinescaleset_info azure_rm_virtualnetwork_info ##### ANSIBLE VERSION ```paste below 2.9 ```
https://github.com/ansible/ansible/issues/61569
https://github.com/ansible/ansible/pull/61805
2a932ad7cfe27663d47cbd43de124fc756632361
e06dbe6e4f925b473acf0d03deda3b6d780a9dd3
2019-08-29T19:41:13Z
python
2019-09-05T09:51:56Z
lib/ansible/modules/cloud/azure/azure_rm_virtualmachineimage_info.py
#!/usr/bin/python # # Copyright (c) 2016 Matt Davis, <[email protected]> # Chris Houseknecht, <[email protected]> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_virtualmachineimage_info version_added: "2.9" short_description: Get virtual machine image facts description: - Get facts for virtual machine images. options: location: description: - Azure location value, for example C(westus), C(eastus), C(eastus2), C(northcentralus), etc. - Supplying only a location value will yield a list of available publishers for the location. required: true publisher: description: - Name of an image publisher. List image offerings associated with a particular publisher. offer: description: - Name of an image offering. Combine with SKU to see a list of available image versions. sku: description: - Image offering SKU. Combine with offer to see a list of available versions. version: description: - Specific version number of an image. extends_documentation_fragment: - azure author: - Chris Houseknecht (@chouseknecht) - Matt Davis (@nitzmahone) ''' EXAMPLES = ''' - name: Get facts for a specific image azure_rm_virtualmachineimage_info: location: eastus publisher: OpenLogic offer: CentOS sku: '7.1' version: '7.1.20160308' - name: List available versions azure_rm_virtualmachineimage_info: location: eastus publisher: OpenLogic offer: CentOS sku: '7.1' - name: List available offers azure_rm_virtualmachineimage_info: location: eastus publisher: OpenLogic - name: List available publishers azure_rm_virtualmachineimage_info: location: eastus ''' RETURN = ''' azure_vmimages: description: - List of image dicts. returned: always type: list example: [ { "id": "/Subscriptions/xxx...xxx/Providers/Microsoft.Compute/Locations/eastus/ Publishers/OpenLogic/ArtifactTypes/VMImage/Offers/CentOS/Skus/7.1/Versions/7.1.20150410", "location": "eastus", "name": "7.1.20150410" }, { "id": "/Subscriptions/xxx...xxx/Providers/Microsoft.Compute/Locations/eastus/ Publishers/OpenLogic/ArtifactTypes/VMImage/Offers/CentOS/Skus/7.1/Versions/7.1.20150605", "location": "eastus", "name": "7.1.20150605" }, { "id": "/Subscriptions/xxx...xxx/Providers/Microsoft.Compute/Locations/eastus/ Publishers/OpenLogic/ArtifactTypes/VMImage/Offers/CentOS/Skus/7.1/Versions/7.1.20150731", "location": "eastus", "name": "7.1.20150731" }, { "id": "/Subscriptions/xxx...xxx/Providers/Microsoft.Compute/Locations/eastus/ Publishers/OpenLogic/ArtifactTypes/VMImage/Offers/CentOS/Skus/7.1/Versions/7.1.20160308", "location": "eastus", "name": "7.1.20160308" } ] ''' try: from msrestazure.azure_exceptions import CloudError except Exception: # This is handled in azure_rm_common pass from ansible.module_utils.azure_rm_common import AzureRMModuleBase AZURE_ENUM_MODULES = ['azure.mgmt.compute.models'] class AzureRMVirtualMachineImageInfo(AzureRMModuleBase): def __init__(self, **kwargs): self.module_arg_spec = dict( location=dict(type='str', required=True), publisher=dict(type='str'), offer=dict(type='str'), sku=dict(type='str'), version=dict(type='str') ) self.results = dict( changed=False, ansible_facts=dict(azure_vmimages=[]) ) self.location = None self.publisher = None self.offer = None self.sku = None self.version = None super(AzureRMVirtualMachineImageInfo, self).__init__(self.module_arg_spec, supports_tags=False) def exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_virtualmachineimage_facts' if is_old_facts: self.module.deprecate("The 'azure_rm_virtualmachineimage_facts' module has been renamed to 'azure_rm_virtualmachineimage_info'", version='2.13') for key in self.module_arg_spec: setattr(self, key, kwargs[key]) if is_old_facts: if self.location and self.publisher and self.offer and self.sku and self.version: self.results['ansible_facts']['azure_vmimages'] = self.get_item() elif self.location and self.publisher and self.offer and self.sku: self.results['ansible_facts']['azure_vmimages'] = self.list_images() elif self.location and self.publisher: self.results['ansible_facts']['azure_vmimages'] = self.list_offers() elif self.location: self.results['ansible_facts']['azure_vmimages'] = self.list_publishers() else: if self.location and self.publisher and self.offer and self.sku and self.version: self.results['vmimages'] = self.get_item() elif self.location and self.publisher and self.offer and self.sku: self.results['vmimages'] = self.list_images() elif self.location and self.publisher: self.results['vmimages'] = self.list_offers() elif self.location: self.results['vmimages'] = self.list_publishers() return self.results def get_item(self): item = None result = [] try: item = self.compute_client.virtual_machine_images.get(self.location, self.publisher, self.offer, self.sku, self.version) except CloudError: pass if item: result = [self.serialize_obj(item, 'VirtualMachineImage', enum_modules=AZURE_ENUM_MODULES)] return result def list_images(self): response = None results = [] try: response = self.compute_client.virtual_machine_images.list(self.location, self.publisher, self.offer, self.sku,) except CloudError: pass except Exception as exc: self.fail("Failed to list images: {0}".format(str(exc))) if response: for item in response: results.append(self.serialize_obj(item, 'VirtualMachineImageResource', enum_modules=AZURE_ENUM_MODULES)) return results def list_offers(self): response = None results = [] try: response = self.compute_client.virtual_machine_images.list_offers(self.location, self.publisher) except CloudError: pass except Exception as exc: self.fail("Failed to list offers: {0}".format(str(exc))) if response: for item in response: results.append(self.serialize_obj(item, 'VirtualMachineImageResource', enum_modules=AZURE_ENUM_MODULES)) return results def list_publishers(self): response = None results = [] try: response = self.compute_client.virtual_machine_images.list_publishers(self.location) except CloudError: pass except Exception as exc: self.fail("Failed to list publishers: {0}".format(str(exc))) if response: for item in response: results.append(self.serialize_obj(item, 'VirtualMachineImageResource', enum_modules=AZURE_ENUM_MODULES)) return results def main(): AzureRMVirtualMachineImageInfo() if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,569
Some azure _info modules still return ansible_facts when called as _info
##### SUMMARY Some of the modules still return something in `ansible_facts`, even though the result in there is an empty list: - ~~`azure_rm_publicipaddress_info`~~ - ~~`azure_rm_networkinterface_info`~~ - `azure_rm_storageaccount_info` - `azure_rm_virtualmachineimage_info` - `azure_rm_virtualmachinescaleset_info` - `azure_rm_virtualnetwork_info` Finally, `azure_rm_resourcegroup_info` always returns `ansible_facts` with content (which is also returned as non-`ansible_facts`). When called as `_info` modules, these modules should not have anything in `ansible_facts` when returning. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ~~azure_rm_publicipaddress_info~~ ~~azure_rm_networkinterface_info~~ azure_rm_resourcegroup_info azure_rm_storageaccount_info azure_rm_virtualmachineimage_info azure_rm_virtualmachinescaleset_info azure_rm_virtualnetwork_info ##### ANSIBLE VERSION ```paste below 2.9 ```
https://github.com/ansible/ansible/issues/61569
https://github.com/ansible/ansible/pull/61805
2a932ad7cfe27663d47cbd43de124fc756632361
e06dbe6e4f925b473acf0d03deda3b6d780a9dd3
2019-08-29T19:41:13Z
python
2019-09-05T09:51:56Z
lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset_info.py
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Sertac Ozercan <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_virtualmachinescaleset_info version_added: "2.9" short_description: Get Virtual Machine Scale Set facts description: - Get facts for a virtual machine scale set. - Note that this module was called M(azure_rm_virtualmachine_scaleset_facts) before Ansible 2.8. The usage did not change. options: name: description: - Limit results to a specific virtual machine scale set. resource_group: description: - The resource group to search for the desired virtual machine scale set. tags: description: - List of tags to be matched. format: description: - Format of the data returned. - If C(raw) is selected information will be returned in raw format from Azure Python SDK. - If C(curated) is selected the structure will be identical to input parameters of M(azure_rm_virtualmachinescaleset) module. - In Ansible 2.5 and lower facts are always returned in raw format. - Please note that this option will be deprecated in 2.10 when curated format will become the only supported format. default: 'raw' choices: - 'curated' - 'raw' version_added: "2.6" extends_documentation_fragment: - azure author: - Sertac Ozercan (@sozercan) ''' EXAMPLES = ''' - name: Get facts for a virtual machine scale set azure_rm_virtualmachinescaleset_info: resource_group: myResourceGroup name: testvmss001 format: curated - name: Get facts for all virtual networks azure_rm_virtualmachinescaleset_info: resource_group: myResourceGroup - name: Get facts by tags azure_rm_virtualmachinescaleset_info: resource_group: myResourceGroup tags: - testing ''' RETURN = ''' vmss: description: - List of virtual machine scale sets. returned: always type: complex contains: id: description: - Resource ID. returned: always type: str sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/scalesets/myscaleset admin_username: description: - Admin username used to access the host after it is created. returned: always type: str sample: adminuser capacity: description: - Capacity of VMSS. returned: always type: int sample: 2 data_disks: description: - List of attached data disks. returned: always type: complex contains: caching: description: - Type of data disk caching. returned: always type: str sample: ReadOnly disk_size_gb: description: - The initial disk size in GB for blank data disks. returned: always type: int sample: 64 lun: description: - The logical unit number for data disk. returned: always type: int sample: 0 managed_disk_type: description: - Managed data disk type. returned: always type: str sample: Standard_LRS image: description: - Image specification. returned: always type: complex contains: offer: description: - The offer of the platform image or marketplace image used to create the virtual machine. returned: always type: str sample: RHEL publisher: description: - Publisher name. returned: always type: str sample: RedHat sku: description: - SKU name. returned: always type: str sample: 7-RAW version: description: - Image version. returned: always type: str sample: 7.5.2018050901 load_balancer: description: - Load balancer name. returned: always type: str sample: testlb location: description: - Resource location. type: str returned: always sample: japaneast managed_disk_type: description: - Managed data disk type. type: str returned: always sample: Standard_LRS name: description: - Resource name. returned: always type: str sample: myvmss os_disk_caching: description: - Type of OS disk caching. type: str returned: always sample: ReadOnly os_type: description: - Base type of operating system. type: str returned: always sample: Linux overprovision: description: - Specifies whether the Virtual Machine Scale Set should be overprovisioned. type: bool sample: true resource_group: description: - Resource group. type: str returned: always sample: myResourceGroup ssh_password_enabled: description: - Is SSH password authentication enabled. Valid only for Linux. type: bool returned: always sample: true subnet_name: description: - Subnet name. type: str returned: always sample: testsubnet tier: description: - SKU Tier. type: str returned: always sample: Basic upgrade_policy: description: - Upgrade policy. type: str returned: always sample: Manual virtual_network_name: description: - Associated virtual network name. type: str returned: always sample: testvn vm_size: description: - Virtual machine size. type: str returned: always sample: Standard_D4 tags: description: - Tags assigned to the resource. Dictionary of string:string pairs. returned: always type: dict sample: { "tag1": "abc" } ''' # NOQA from ansible.module_utils.azure_rm_common import AzureRMModuleBase import re try: from msrestazure.azure_exceptions import CloudError except Exception: # handled in azure_rm_common pass AZURE_OBJECT_CLASS = 'VirtualMachineScaleSet' AZURE_ENUM_MODULES = ['azure.mgmt.compute.models'] class AzureRMVirtualMachineScaleSetInfo(AzureRMModuleBase): """Utility class to get virtual machine scale set facts""" def __init__(self): self.module_args = dict( name=dict(type='str'), resource_group=dict(type='str'), tags=dict(type='list'), format=dict( type='str', choices=['curated', 'raw'], default='raw' ) ) self.results = dict( changed=False, ansible_facts=dict( azure_vmss=[] ) ) self.name = None self.resource_group = None self.format = None self.tags = None super(AzureRMVirtualMachineScaleSetInfo, self).__init__( derived_arg_spec=self.module_args, supports_tags=False, facts_module=True ) def exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_virtualmachinescaleset_facts' if is_old_facts: self.module.deprecate("The 'azure_rm_virtualmachinescaleset_facts' module has been renamed to 'azure_rm_virtualmachinescaleset_info'", version='2.13') for key in self.module_args: setattr(self, key, kwargs[key]) if self.name and not self.resource_group: self.fail("Parameter error: resource group required when filtering by name.") if is_old_facts: if self.name: self.results['ansible_facts']['azure_vmss'] = self.get_item() else: self.results['ansible_facts']['azure_vmss'] = self.list_items() else: if self.name: self.results['vmss'] = self.get_item() else: self.results['vmss'] = self.list_items() if self.format == 'curated': for index in range(len(self.results['ansible_facts']['azure_vmss'])): vmss = self.results['ansible_facts']['azure_vmss'][index] subnet_name = None load_balancer_name = None virtual_network_name = None ssh_password_enabled = False try: subnet_id = (vmss['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations'][0] ['properties']['ipConfigurations'][0]['properties']['subnet']['id']) subnet_name = re.sub('.*subnets\\/', '', subnet_id) except Exception: self.log('Could not extract subnet name') try: backend_address_pool_id = (vmss['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations'][0] ['properties']['ipConfigurations'][0]['properties']['loadBalancerBackendAddressPools'][0]['id']) load_balancer_name = re.sub('\\/backendAddressPools.*', '', re.sub('.*loadBalancers\\/', '', backend_address_pool_id)) virtual_network_name = re.sub('.*virtualNetworks\\/', '', re.sub('\\/subnets.*', '', subnet_id)) except Exception: self.log('Could not extract load balancer / virtual network name') try: ssh_password_enabled = (not vmss['properties']['virtualMachineProfile']['osProfile'] ['linuxConfiguration']['disablePasswordAuthentication']) except Exception: self.log('Could not extract SSH password enabled') data_disks = vmss['properties']['virtualMachineProfile']['storageProfile'].get('dataDisks', []) for disk_index in range(len(data_disks)): old_disk = data_disks[disk_index] new_disk = { 'lun': old_disk['lun'], 'disk_size_gb': old_disk['diskSizeGB'], 'managed_disk_type': old_disk['managedDisk']['storageAccountType'], 'caching': old_disk['caching'] } data_disks[disk_index] = new_disk updated = { 'id': vmss['id'], 'resource_group': self.resource_group, 'name': vmss['name'], 'state': 'present', 'location': vmss['location'], 'vm_size': vmss['sku']['name'], 'capacity': vmss['sku']['capacity'], 'tier': vmss['sku']['tier'], 'upgrade_policy': vmss['properties']['upgradePolicy']['mode'], 'admin_username': vmss['properties']['virtualMachineProfile']['osProfile']['adminUsername'], 'admin_password': vmss['properties']['virtualMachineProfile']['osProfile'].get('adminPassword'), 'ssh_password_enabled': ssh_password_enabled, 'image': vmss['properties']['virtualMachineProfile']['storageProfile']['imageReference'], 'os_disk_caching': vmss['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching'], 'os_type': 'Linux' if (vmss['properties']['virtualMachineProfile']['osProfile'].get('linuxConfiguration') is not None) else 'Windows', 'overprovision': vmss['properties']['overprovision'], 'managed_disk_type': vmss['properties']['virtualMachineProfile']['storageProfile']['osDisk']['managedDisk']['storageAccountType'], 'data_disks': data_disks, 'virtual_network_name': virtual_network_name, 'subnet_name': subnet_name, 'load_balancer': load_balancer_name, 'tags': vmss.get('tags') } self.results['ansible_facts']['azure_vmss'][index] = updated # proper result format we want to support in the future # dropping 'ansible_facts' and shorter name 'vmss' self.results['vmss'] = self.results['ansible_facts']['azure_vmss'] if not is_old_facts: self.results.pop('ansible_facts', None) return self.results def get_item(self): """Get a single virtual machine scale set""" self.log('Get properties for {0}'.format(self.name)) item = None results = [] try: item = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name) except CloudError: pass if item and self.has_tags(item.tags, self.tags): results = [self.serialize_obj(item, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)] return results def list_items(self): """Get all virtual machine scale sets""" self.log('List all virtual machine scale sets') try: response = self.compute_client.virtual_machine_scale_sets.list(self.resource_group) except CloudError as exc: self.fail('Failed to list all items - {0}'.format(str(exc))) results = [] for item in response: if self.has_tags(item.tags, self.tags): results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)) return results def main(): """Main module execution code path""" AzureRMVirtualMachineScaleSetInfo() if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,569
Some azure _info modules still return ansible_facts when called as _info
##### SUMMARY Some of the modules still return something in `ansible_facts`, even though the result in there is an empty list: - ~~`azure_rm_publicipaddress_info`~~ - ~~`azure_rm_networkinterface_info`~~ - `azure_rm_storageaccount_info` - `azure_rm_virtualmachineimage_info` - `azure_rm_virtualmachinescaleset_info` - `azure_rm_virtualnetwork_info` Finally, `azure_rm_resourcegroup_info` always returns `ansible_facts` with content (which is also returned as non-`ansible_facts`). When called as `_info` modules, these modules should not have anything in `ansible_facts` when returning. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ~~azure_rm_publicipaddress_info~~ ~~azure_rm_networkinterface_info~~ azure_rm_resourcegroup_info azure_rm_storageaccount_info azure_rm_virtualmachineimage_info azure_rm_virtualmachinescaleset_info azure_rm_virtualnetwork_info ##### ANSIBLE VERSION ```paste below 2.9 ```
https://github.com/ansible/ansible/issues/61569
https://github.com/ansible/ansible/pull/61805
2a932ad7cfe27663d47cbd43de124fc756632361
e06dbe6e4f925b473acf0d03deda3b6d780a9dd3
2019-08-29T19:41:13Z
python
2019-09-05T09:51:56Z
lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_info.py
#!/usr/bin/python # # Copyright (c) 2016 Matt Davis, <[email protected]> # Chris Houseknecht, <[email protected]> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_virtualnetwork_info version_added: "2.9" short_description: Get virtual network facts description: - Get facts for a specific virtual network or all virtual networks within a resource group. options: name: description: - Only show results for a specific security group. resource_group: description: - Limit results by resource group. Required when filtering by name. tags: description: - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. extends_documentation_fragment: - azure author: - Chris Houseknecht (@chouseknecht) - Matt Davis (@nitzmahone) ''' EXAMPLES = ''' - name: Get facts for one virtual network azure_rm_virtualnetwork_info: resource_group: myResourceGroup name: secgroup001 - name: Get facts for all virtual networks azure_rm_virtualnetwork_info: resource_group: myResourceGroup - name: Get facts by tags azure_rm_virtualnetwork_info: tags: - testing ''' RETURN = ''' azure_virtualnetworks: description: - List of virtual network dicts. returned: always type: list example: [{ "etag": 'W/"532ba1be-ae71-40f2-9232-3b1d9cf5e37e"', "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet2001", "location": "eastus2", "name": "vnet2001", "properties": { "addressSpace": { "addressPrefixes": [ "10.10.0.0/16" ] }, "provisioningState": "Succeeded", "resourceGuid": "a7ba285f-f7e7-4e17-992a-de4d39f28612", "subnets": [] }, "type": "Microsoft.Network/virtualNetworks" }] virtualnetworks: description: - List of virtual network dicts with same format as M(azure_rm_virtualnetwork) module parameters. returned: always type: complex contains: id: description: - Resource ID of the virtual network. sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet2001 returned: always type: str address_prefixes: description: - List of IPv4 address ranges where each is formatted using CIDR notation. sample: ["10.10.0.0/16"] returned: always type: list dns_servers: description: - Custom list of DNS servers. returned: always type: list sample: ["www.azure.com"] location: description: - Valid Azure location. returned: always type: str sample: eastus tags: description: - Tags assigned to the resource. Dictionary of string:string pairs. returned: always type: dict sample: { "tag1": "abc" } provisioning_state: description: - Provisioning state of the resource. returned: always sample: Succeeded type: str name: description: - Name of the virtual network. returned: always type: str sample: foo subnets: description: - Subnets associated with the virtual network. returned: always type: list contains: id: description: - Resource ID of the subnet. returned: always type: str sample: "/subscriptions/f64d4ee8-be94-457d-ba26-3fa6b6506cef/resourceGroups/v-xisuRG/providers/ Microsoft.Network/virtualNetworks/vnetb57dc95232/subnets/vnetb57dc95232" name: description: - Name of the subnet. returned: always type: str sample: vnetb57dc95232 provisioning_state: description: - Provisioning state of the subnet. returned: always type: str sample: Succeeded address_prefix: description: - The address prefix for the subnet. returned: always type: str sample: '10.1.0.0/16' network_security_group: description: - Existing security group ID with which to associate the subnet. returned: always type: str sample: null route_table: description: - The reference of the RouteTable resource. returned: always type: str sample: null service_endpoints: description: - An array of service endpoints. returned: always type: list sample: [ { "locations": [ "southeastasia", "eastasia" ], "service": "Microsoft.Storage" } ] ''' try: from msrestazure.azure_exceptions import CloudError except Exception: # This is handled in azure_rm_common pass from ansible.module_utils.azure_rm_common import AzureRMModuleBase AZURE_OBJECT_CLASS = 'VirtualNetwork' class AzureRMNetworkInterfaceInfo(AzureRMModuleBase): def __init__(self): self.module_arg_spec = dict( name=dict(type='str'), resource_group=dict(type='str'), tags=dict(type='list'), ) self.results = dict( changed=False, ansible_facts=dict(azure_virtualnetworks=[]), virtualnetworks=[] ) self.name = None self.resource_group = None self.tags = None super(AzureRMNetworkInterfaceInfo, self).__init__(self.module_arg_spec, supports_tags=False, facts_module=True) def exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_virtualnetwork_facts' if is_old_facts: self.module.deprecate("The 'azure_rm_virtualnetwork_facts' module has been renamed to 'azure_rm_virtualnetwork_info'", version='2.13') for key in self.module_arg_spec: setattr(self, key, kwargs[key]) if self.name is not None: results = self.get_item() elif self.resource_group is not None: results = self.list_resource_group() else: results = self.list_items() if is_old_facts: self.results['ansible_facts']['azure_virtualnetworks'] = self.serialize(results) self.results['virtualnetworks'] = self.curated(results) return self.results def get_item(self): self.log('Get properties for {0}'.format(self.name)) item = None results = [] try: item = self.network_client.virtual_networks.get(self.resource_group, self.name) except CloudError: pass if item and self.has_tags(item.tags, self.tags): results = [item] return results def list_resource_group(self): self.log('List items for resource group') try: response = self.network_client.virtual_networks.list(self.resource_group) except CloudError as exc: self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc))) results = [] for item in response: if self.has_tags(item.tags, self.tags): results.append(item) return results def list_items(self): self.log('List all for items') try: response = self.network_client.virtual_networks.list_all() except CloudError as exc: self.fail("Failed to list all items - {0}".format(str(exc))) results = [] for item in response: if self.has_tags(item.tags, self.tags): results.append(item) return results def serialize(self, raws): self.log("Serialize all items") return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raws] if raws else [] def curated(self, raws): self.log("Format all items") return [self.virtualnetwork_to_dict(x) for x in raws] if raws else [] def virtualnetwork_to_dict(self, vnet): results = dict( id=vnet.id, name=vnet.name, location=vnet.location, tags=vnet.tags, provisioning_state=vnet.provisioning_state ) if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0: results['dns_servers'] = [] for server in vnet.dhcp_options.dns_servers: results['dns_servers'].append(server) if vnet.address_space and len(vnet.address_space.address_prefixes) > 0: results['address_prefixes'] = [] for space in vnet.address_space.address_prefixes: results['address_prefixes'].append(space) if vnet.subnets and len(vnet.subnets) > 0: results['subnets'] = [self.subnet_to_dict(x) for x in vnet.subnets] return results def subnet_to_dict(self, subnet): result = dict( id=subnet.id, name=subnet.name, provisioning_state=subnet.provisioning_state, address_prefix=subnet.address_prefix, network_security_group=subnet.network_security_group.id if subnet.network_security_group else None, route_table=subnet.route_table.id if subnet.route_table else None ) if subnet.service_endpoints: result['service_endpoints'] = [{'service': item.service, 'locations': item.locations} for item in subnet.service_endpoints] return result def main(): AzureRMNetworkInterfaceInfo() if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,833
gitlab_hook module doesn't have parameter "trigger by comment"
<!--- Verify first that your feature was not already discussed on GitHub --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Describe the new feature/improvement briefly below --> In gitlab_hook module doesn't have parameter "Comment" which is necessary for my DevOps flow and generally very useful for triggering a job. Without this option, I and a lot of people cannot use this module fully. ##### ISSUE TYPE - Feature Idea ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> https://docs.ansible.com/ansible/latest/modules/gitlab_hook_module.html ##### ADDITIONAL INFORMATION <!--- Describe how the feature would be used, why it is needed and what it would solve --> 1. I use the newest version of ansible. 2. It's about this option in gitlab: [![comments.png](https://i.postimg.cc/C5tkJdJ7/comments.png)](https://postimg.cc/sGYMgjkG) <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can also paste gist.github.com links for larger files -->
https://github.com/ansible/ansible/issues/61833
https://github.com/ansible/ansible/pull/61837
e06dbe6e4f925b473acf0d03deda3b6d780a9dd3
223dab99ea0075c96bf5bd049d2a140de5b4a285
2019-09-05T08:28:07Z
python
2019-09-05T11:39:59Z
lib/ansible/modules/source_control/gitlab_hook.py
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2019, Guillaume Martinez ([email protected]) # Copyright: (c) 2018, Marcus Watkins <[email protected]> # Based on code: # Copyright: (c) 2013, Phillip Gentry <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gitlab_hook short_description: Manages GitLab project hooks. description: - Adds, updates and removes project hook version_added: "2.6" author: - Marcus Watkins (@marwatk) - Guillaume Martinez (@Lunik) requirements: - python >= 2.7 - python-gitlab python module extends_documentation_fragment: - auth_basic options: api_token: description: - GitLab token for logging in. version_added: "2.8" type: str aliases: - private_token - access_token project: description: - Id or Full path of the project in the form of group/name required: true type: str hook_url: description: - The url that you want GitLab to post to, this is used as the primary key for updates and deletion. required: true type: str state: description: - When C(present) the hook will be updated to match the input or created if it doesn't exist. When C(absent) it will be deleted if it exists. required: true default: present type: str choices: [ "present", "absent" ] push_events: description: - Trigger hook on push events type: bool default: yes issues_events: description: - Trigger hook on issues events type: bool default: no merge_requests_events: description: - Trigger hook on merge requests events type: bool default: no tag_push_events: description: - Trigger hook on tag push events type: bool default: no note_events: description: - Trigger hook on note events type: bool default: no job_events: description: - Trigger hook on job events type: bool default: no pipeline_events: description: - Trigger hook on pipeline events type: bool default: no wiki_page_events: description: - Trigger hook on wiki events type: bool default: no hook_validate_certs: description: - Whether GitLab will do SSL verification when triggering the hook type: bool default: no aliases: [ enable_ssl_verification ] token: description: - Secret token to validate hook messages at the receiver. - If this is present it will always result in a change as it cannot be retrieved from GitLab. - Will show up in the X-GitLab-Token HTTP request header required: false type: str ''' EXAMPLES = ''' - name: "Adding a project hook" gitlab_hook: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" project: "my_group/my_project" hook_url: "https://my-ci-server.example.com/gitlab-hook" state: present push_events: yes tag_push_events: yes hook_validate_certs: no token: "my-super-secret-token-that-my-ci-server-will-check" - name: "Delete the previous hook" gitlab_hook: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" project: "my_group/my_project" hook_url: "https://my-ci-server.example.com/gitlab-hook" state: absent - name: "Delete a hook by numeric project id" gitlab_hook: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" project: 10 hook_url: "https://my-ci-server.example.com/gitlab-hook" state: absent ''' RETURN = ''' msg: description: Success or failure message returned: always type: str sample: "Success" result: description: json parsed response from the server returned: always type: dict error: description: the error message returned by the GitLab API returned: failed type: str sample: "400: path is already in use" hook: description: API object returned: always type: dict ''' import os import re import traceback GITLAB_IMP_ERR = None try: import gitlab HAS_GITLAB_PACKAGE = True except Exception: GITLAB_IMP_ERR = traceback.format_exc() HAS_GITLAB_PACKAGE = False from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils._text import to_native from ansible.module_utils.gitlab import findProject class GitLabHook(object): def __init__(self, module, gitlab_instance): self._module = module self._gitlab = gitlab_instance self.hookObject = None ''' @param prokect Project Object @param hook_url Url to call on event @param description Description of the group @param parent Parent group full path ''' def createOrUpdateHook(self, project, hook_url, options): changed = False # Because we have already call userExists in main() if self.hookObject is None: hook = self.createHook(project, { 'url': hook_url, 'push_events': options['push_events'], 'issues_events': options['issues_events'], 'merge_requests_events': options['merge_requests_events'], 'tag_push_events': options['tag_push_events'], 'note_events': options['note_events'], 'job_events': options['job_events'], 'pipeline_events': options['pipeline_events'], 'wiki_page_events': options['wiki_page_events'], 'enable_ssl_verification': options['enable_ssl_verification'], 'token': options['token']}) changed = True else: changed, hook = self.updateHook(self.hookObject, { 'push_events': options['push_events'], 'issues_events': options['issues_events'], 'merge_requests_events': options['merge_requests_events'], 'tag_push_events': options['tag_push_events'], 'note_events': options['note_events'], 'job_events': options['job_events'], 'pipeline_events': options['pipeline_events'], 'wiki_page_events': options['wiki_page_events'], 'enable_ssl_verification': options['enable_ssl_verification'], 'token': options['token']}) self.hookObject = hook if changed: if self._module.check_mode: self._module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url) try: hook.save() except Exception as e: self._module.fail_json(msg="Failed to update hook: %s " % e) return True else: return False ''' @param project Project Object @param arguments Attributs of the hook ''' def createHook(self, project, arguments): if self._module.check_mode: return True hook = project.hooks.create(arguments) return hook ''' @param hook Hook Object @param arguments Attributs of the hook ''' def updateHook(self, hook, arguments): changed = False for arg_key, arg_value in arguments.items(): if arguments[arg_key] is not None: if getattr(hook, arg_key) != arguments[arg_key]: setattr(hook, arg_key, arguments[arg_key]) changed = True return (changed, hook) ''' @param project Project object @param hook_url Url to call on event ''' def findHook(self, project, hook_url): hooks = project.hooks.list() for hook in hooks: if (hook.url == hook_url): return hook ''' @param project Project object @param hook_url Url to call on event ''' def existsHook(self, project, hook_url): # When project exists, object will be stored in self.projectObject. hook = self.findHook(project, hook_url) if hook: self.hookObject = hook return True return False def deleteHook(self): if self._module.check_mode: return True return self.hookObject.delete() def deprecation_warning(module): deprecated_aliases = ['private_token', 'access_token'] module.deprecate("Aliases \'{aliases}\' are deprecated".format(aliases='\', \''.join(deprecated_aliases)), "2.10") def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(dict( api_token=dict(type='str', no_log=True, aliases=["private_token", "access_token"]), state=dict(type='str', default="present", choices=["absent", "present"]), project=dict(type='str', required=True), hook_url=dict(type='str', required=True), push_events=dict(type='bool', default=True), issues_events=dict(type='bool', default=False), merge_requests_events=dict(type='bool', default=False), tag_push_events=dict(type='bool', default=False), note_events=dict(type='bool', default=False), job_events=dict(type='bool', default=False), pipeline_events=dict(type='bool', default=False), wiki_page_events=dict(type='bool', default=False), hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']), token=dict(type='str', no_log=True), )) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ ['api_username', 'api_token'], ['api_password', 'api_token'] ], required_together=[ ['api_username', 'api_password'] ], required_one_of=[ ['api_username', 'api_token'] ], supports_check_mode=True, ) deprecation_warning(module) gitlab_url = re.sub('/api.*', '', module.params['api_url']) validate_certs = module.params['validate_certs'] gitlab_user = module.params['api_username'] gitlab_password = module.params['api_password'] gitlab_token = module.params['api_token'] state = module.params['state'] project_identifier = module.params['project'] hook_url = module.params['hook_url'] push_events = module.params['push_events'] issues_events = module.params['issues_events'] merge_requests_events = module.params['merge_requests_events'] tag_push_events = module.params['tag_push_events'] note_events = module.params['note_events'] job_events = module.params['job_events'] pipeline_events = module.params['pipeline_events'] wiki_page_events = module.params['wiki_page_events'] enable_ssl_verification = module.params['hook_validate_certs'] hook_token = module.params['token'] if not HAS_GITLAB_PACKAGE: module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) try: gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password, private_token=gitlab_token, api_version=4) gitlab_instance.auth() except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e: module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e)) except (gitlab.exceptions.GitlabHttpError) as e: module.fail_json(msg="Failed to connect to GitLab server: %s. \ GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e)) gitlab_hook = GitLabHook(module, gitlab_instance) project = findProject(gitlab_instance, project_identifier) if project is None: module.fail_json(msg="Failed to create hook: project %s doesn't exists" % project_identifier) hook_exists = gitlab_hook.existsHook(project, hook_url) if state == 'absent': if hook_exists: gitlab_hook.deleteHook() module.exit_json(changed=True, msg="Successfully deleted hook %s" % hook_url) else: module.exit_json(changed=False, msg="Hook deleted or does not exists") if state == 'present': if gitlab_hook.createOrUpdateHook(project, hook_url, { "push_events": push_events, "issues_events": issues_events, "merge_requests_events": merge_requests_events, "tag_push_events": tag_push_events, "note_events": note_events, "job_events": job_events, "pipeline_events": pipeline_events, "wiki_page_events": wiki_page_events, "enable_ssl_verification": enable_ssl_verification, "token": hook_token}): module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs) else: module.exit_json(changed=False, msg="No need to update the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs) if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,547
_net_logging and ios_logging dest: file
<!--- Verify first that your improvement is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below, add suggestions to wording or structure --> In net_logging documentation examples, 'file' is used as 'dest:' but not mentioned in parameters choices In ios_logging, the parameter 'name' says "If value of dest is file it indicates file-name" but again 'file' is not stated as a valid 'dest' choice. I can update the doc for net_logging as it seems file is a valid choice because it is used in examples but for ios_logging, I'm not sure if it's a typo or if 'file' should be valid too. <!--- HINT: Did you know the documentation has an "Edit on GitHub" link on every page ? --> ##### ISSUE TYPE - Documentation Report ##### COMPONENT NAME <!--- Write the short name of the rst file, module, plugin, task or feature below, use your best guess if unsure --> net_logging, ios_logging ##### ADDITIONAL INFORMATION <!--- Describe how this improves the documentation, e.g. before/after situation or screenshots --> <!--- HINT: You can paste gist.github.com links for larger files -->
https://github.com/ansible/ansible/issues/61547
https://github.com/ansible/ansible/pull/61777
f406b8b4c4046f31330347e4456147cb85e44a27
cc9adf7f1ae819db13c77ee99da824be8b762ec5
2019-08-29T13:14:31Z
python
2019-09-05T12:58:57Z
lib/ansible/modules/network/eos/eos_logging.py
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: eos_logging version_added: "2.4" author: "Trishna Guha (@trishnaguha)" short_description: Manage logging on network devices description: - This module provides declarative management of logging on Arista Eos devices. notes: - Tested against EOS 4.15 options: dest: description: - Destination of the logs. choices: ['on', 'host', console', 'monitor', 'buffered'] name: description: - If value of C(dest) is I(host) C(name) should be specified, which indicates hostname or IP address. size: description: - Size of buffer. The acceptable value is in range from 10 to 2147483647 bytes. facility: description: - Set logging facility. level: description: - Set logging severity levels. choices: ['emergencies', 'alerts', 'critical', 'errors', 'warnings', 'notifications', 'informational', 'debugging'] aggregate: description: List of logging definitions. state: description: - State of the logging configuration. default: present choices: ['present', 'absent'] extends_documentation_fragment: eos """ EXAMPLES = """ - name: configure host logging eos_logging: dest: host name: 172.16.0.1 state: present - name: remove host logging configuration eos_logging: dest: host name: 172.16.0.1 state: absent - name: configure console logging level and facility eos_logging: dest: console facility: local7 level: debugging state: present - name: enable logging to all eos_logging: dest : on - name: configure buffer size eos_logging: dest: buffered size: 5000 - name: Configure logging using aggregate eos_logging: aggregate: - { dest: console, level: warnings } - { dest: buffered, size: 480000 } state: present """ RETURN = """ commands: description: The list of configuration mode commands to send to the device returned: always type: list sample: - logging facility local7 - logging host 172.16.0.1 """ import re from copy import deepcopy from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.common.utils import remove_default_spec from ansible.module_utils.network.eos.eos import get_config, load_config from ansible.module_utils.network.eos.eos import eos_argument_spec, check_args DEST_GROUP = ['on', 'host', 'console', 'monitor', 'buffered'] LEVEL_GROUP = ['emergencies', 'alerts', 'critical', 'errors', 'warnings', 'notifications', 'informational', 'debugging'] def validate_size(value, module): if value: if not int(10) <= value <= int(2147483647): module.fail_json(msg='size must be between 10 and 2147483647') else: return value def map_obj_to_commands(updates, module): commands = list() want, have = updates for w in want: dest = w['dest'] name = w['name'] size = w['size'] facility = w['facility'] level = w['level'] state = w['state'] del w['state'] if state == 'absent' and w in have: if dest: if dest == 'host': commands.append('no logging host {0}'.format(name)) elif dest in DEST_GROUP: commands.append('no logging {0}'.format(dest)) else: module.fail_json(msg='dest must be among console, monitor, buffered, host, on') if facility: commands.append('no logging facility {0}'.format(facility)) if state == 'present' and w not in have: if facility: present = False # Iterate over every dictionary in the 'have' list to check if # similar configuration for facility exists or not for entry in have: if not entry['dest'] and entry['facility'] == facility: present = True if not present: commands.append('logging facility {0}'.format(facility)) if dest == 'host': commands.append('logging host {0}'.format(name)) elif dest == 'on': commands.append('logging on') elif dest == 'buffered' and size: present = False # Deals with the following two cases: # Case 1: logging buffered <size> <level> # logging buffered <same-size> # # Case 2: Same buffered logging configuration # already exists (i.e., both size & # level are same) for entry in have: if entry['dest'] == 'buffered' and entry['size'] == size: if not level or entry['level'] == level: present = True if not present: if size and level: commands.append('logging buffered {0} {1}'.format(size, level)) else: commands.append('logging buffered {0}'.format(size)) else: dest_cmd = 'logging {0}'.format(dest) if level: dest_cmd += ' {0}'.format(level) commands.append(dest_cmd) return commands def parse_facility(line): facility = None match = re.search(r'logging facility (\S+)', line, re.M) if match: facility = match.group(1) return facility def parse_size(line, dest): size = None if dest == 'buffered': match = re.search(r'logging buffered (\S+)', line, re.M) if match: try: int_size = int(match.group(1)) except ValueError: int_size = None if int_size: if isinstance(int_size, int): size = str(match.group(1)) else: size = str(10) return size def parse_name(line, dest): name = None if dest == 'host': match = re.search(r'logging host (\S+)', line, re.M) if match: name = match.group(1) return name def parse_level(line, dest): level = None if dest != 'host': # Line for buffer logging entry in running-config is of the form: # logging buffered <size> <level> if dest == 'buffered': match = re.search(r'logging buffered (?:\d+) (\S+)', line, re.M) else: match = re.search(r'logging {0} (\S+)'.format(dest), line, re.M) if match: if match.group(1) in LEVEL_GROUP: level = match.group(1) return level def map_config_to_obj(module): obj = [] data = get_config(module, flags=['section logging']) for line in data.split('\n'): match = re.search(r'logging (\S+)', line, re.M) if match: if match.group(1) in DEST_GROUP: dest = match.group(1) else: dest = None obj.append({'dest': dest, 'name': parse_name(line, dest), 'size': parse_size(line, dest), 'facility': parse_facility(line), 'level': parse_level(line, dest)}) return obj def parse_obj(obj, module): if module.params['size'] is None: obj.append({ 'dest': module.params['dest'], 'name': module.params['name'], 'size': module.params['size'], 'facility': module.params['facility'], 'level': module.params['level'], 'state': module.params['state'] }) else: obj.append({ 'dest': module.params['dest'], 'name': module.params['name'], 'size': str(validate_size(module.params['size'], module)), 'facility': module.params['facility'], 'level': module.params['level'], 'state': module.params['state'] }) return obj def map_params_to_obj(module, required_if=None): obj = [] aggregate = module.params.get('aggregate') if aggregate: for item in aggregate: for key in item: if item.get(key) is None: item[key] = module.params[key] module._check_required_if(required_if, item) d = item.copy() if d['dest'] != 'host': d['name'] = None if d['dest'] == 'buffered': if 'size' in d: d['size'] = str(validate_size(d['size'], module)) elif 'size' not in d: d['size'] = str(10) else: pass if d['dest'] != 'buffered': d['size'] = None obj.append(d) else: if module.params['dest'] != 'host': module.params['name'] = None if module.params['dest'] == 'buffered': if not module.params['size']: module.params['size'] = str(10) else: module.params['size'] = None parse_obj(obj, module) return obj def main(): """ main entry point for module execution """ element_spec = dict( dest=dict(choices=DEST_GROUP), name=dict(), size=dict(type='int'), facility=dict(), level=dict(choices=LEVEL_GROUP), state=dict(default='present', choices=['present', 'absent']), ) aggregate_spec = deepcopy(element_spec) # remove default in aggregate spec, to handle common arguments remove_default_spec(aggregate_spec) argument_spec = dict( aggregate=dict(type='list', elements='dict', options=aggregate_spec), ) argument_spec.update(element_spec) argument_spec.update(eos_argument_spec) required_if = [('dest', 'host', ['name'])] module = AnsibleModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) warnings = list() check_args(module, warnings) result = {'changed': False} if warnings: result['warnings'] = warnings have = map_config_to_obj(module) want = map_params_to_obj(module, required_if=required_if) commands = map_obj_to_commands((want, have), module) result['commands'] = commands if commands: commit = not module.check_mode response = load_config(module, commands, commit=commit) if response.get('diff') and module._diff: result['diff'] = {'prepared': response.get('diff')} result['session_name'] = response.get('session') result['changed'] = True module.exit_json(**result) if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,547
_net_logging and ios_logging dest: file
<!--- Verify first that your improvement is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below, add suggestions to wording or structure --> In net_logging documentation examples, 'file' is used as 'dest:' but not mentioned in parameters choices In ios_logging, the parameter 'name' says "If value of dest is file it indicates file-name" but again 'file' is not stated as a valid 'dest' choice. I can update the doc for net_logging as it seems file is a valid choice because it is used in examples but for ios_logging, I'm not sure if it's a typo or if 'file' should be valid too. <!--- HINT: Did you know the documentation has an "Edit on GitHub" link on every page ? --> ##### ISSUE TYPE - Documentation Report ##### COMPONENT NAME <!--- Write the short name of the rst file, module, plugin, task or feature below, use your best guess if unsure --> net_logging, ios_logging ##### ADDITIONAL INFORMATION <!--- Describe how this improves the documentation, e.g. before/after situation or screenshots --> <!--- HINT: You can paste gist.github.com links for larger files -->
https://github.com/ansible/ansible/issues/61547
https://github.com/ansible/ansible/pull/61777
f406b8b4c4046f31330347e4456147cb85e44a27
cc9adf7f1ae819db13c77ee99da824be8b762ec5
2019-08-29T13:14:31Z
python
2019-09-05T12:58:57Z
lib/ansible/modules/network/ios/ios_logging.py
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Ansible by Red Hat, inc # # This file is part of Ansible by Red Hat # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: ios_logging version_added: "2.4" author: "Trishna Guha (@trishnaguha)" short_description: Manage logging on network devices description: - This module provides declarative management of logging on Cisco Ios devices. notes: - Tested against IOS 15.6 options: dest: description: - Destination of the logs. choices: ['on', 'host', 'console', 'monitor', 'buffered', 'trap'] name: description: - If value of C(dest) is I(file) it indicates file-name, for I(user) it indicates username and for I(host) indicates the host name to be notified. size: description: - Size of buffer. The acceptable value is in range from 4096 to 4294967295 bytes. default: 4096 facility: description: - Set logging facility. level: description: - Set logging severity levels. default: debugging choices: ['emergencies', 'alerts', 'critical', 'errors', 'warnings', 'notifications', 'informational', 'debugging'] aggregate: description: List of logging definitions. state: description: - State of the logging configuration. default: present choices: ['present', 'absent'] extends_documentation_fragment: ios """ EXAMPLES = """ - name: configure host logging ios_logging: dest: host name: 172.16.0.1 state: present - name: remove host logging configuration ios_logging: dest: host name: 172.16.0.1 state: absent - name: configure console logging level and facility ios_logging: dest: console facility: local7 level: debugging state: present - name: enable logging to all ios_logging: dest : on - name: configure buffer size ios_logging: dest: buffered size: 5000 - name: Configure logging using aggregate ios_logging: aggregate: - { dest: console, level: notifications } - { dest: buffered, size: 9000 } - name: remove logging using aggregate ios_logging: aggregate: - { dest: console, level: notifications } - { dest: buffered, size: 9000 } state: absent """ RETURN = """ commands: description: The list of configuration mode commands to send to the device returned: always type: list sample: - logging facility local7 - logging host 172.16.0.1 """ import re from copy import deepcopy from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.common.utils import remove_default_spec, validate_ip_address from ansible.module_utils.network.ios.ios import get_config, load_config from ansible.module_utils.network.ios.ios import get_capabilities from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args def validate_size(value, module): if value: if not int(4096) <= int(value) <= int(4294967295): module.fail_json(msg='size must be between 4096 and 4294967295') else: return value def map_obj_to_commands(updates, module, os_version): dest_group = ('console', 'monitor', 'buffered', 'on', 'trap') commands = list() want, have = updates for w in want: dest = w['dest'] name = w['name'] size = w['size'] facility = w['facility'] level = w['level'] state = w['state'] del w['state'] if facility: w['dest'] = 'facility' if state == 'absent' and w in have: if dest: if dest == 'host': if '12.' in os_version: commands.append('no logging {0}'.format(name)) else: commands.append('no logging host {0}'.format(name)) elif dest in dest_group: commands.append('no logging {0}'.format(dest)) else: module.fail_json(msg='dest must be among console, monitor, buffered, host, on, trap') if facility: commands.append('no logging facility {0}'.format(facility)) if state == 'present' and w not in have: if facility: present = False for entry in have: if entry['dest'] == 'facility' and entry['facility'] == facility: present = True if not present: commands.append('logging facility {0}'.format(facility)) if dest == 'host': if '12.' in os_version: commands.append('logging {0}'.format(name)) else: commands.append('logging host {0}'.format(name)) elif dest == 'on': commands.append('logging on') elif dest == 'buffered' and size: present = False for entry in have: if entry['dest'] == 'buffered' and entry['size'] == size and entry['level'] == level: present = True if not present: if level and level != 'debugging': commands.append('logging buffered {0} {1}'.format(size, level)) else: commands.append('logging buffered {0}'.format(size)) else: if dest: dest_cmd = 'logging {0}'.format(dest) if level: dest_cmd += ' {0}'.format(level) commands.append(dest_cmd) return commands def parse_facility(line, dest): facility = None if dest == 'facility': match = re.search(r'logging facility (\S+)', line, re.M) if match: facility = match.group(1) return facility def parse_size(line, dest): size = None if dest == 'buffered': match = re.search(r'logging buffered(?: (\d+))?(?: [a-z]+)?', line, re.M) if match: if match.group(1) is not None: size = match.group(1) else: size = "4096" return size def parse_name(line, dest): if dest == 'host': match = re.search(r'logging host (\S+)', line, re.M) if match: name = match.group(1) else: name = None return name def parse_level(line, dest): level_group = ('emergencies', 'alerts', 'critical', 'errors', 'warnings', 'notifications', 'informational', 'debugging') if dest == 'host': level = 'debugging' else: if dest == 'buffered': match = re.search(r'logging buffered(?: \d+)?(?: ([a-z]+))?', line, re.M) else: match = re.search(r'logging {0} (\S+)'.format(dest), line, re.M) if match and match.group(1) in level_group: level = match.group(1) else: level = 'debugging' return level def map_config_to_obj(module): obj = [] dest_group = ('console', 'host', 'monitor', 'buffered', 'on', 'facility', 'trap') data = get_config(module, flags=['| include logging']) for line in data.split('\n'): match = re.search(r'^logging (\S+)', line, re.M) if match: if match.group(1) in dest_group: dest = match.group(1) obj.append({ 'dest': dest, 'name': parse_name(line, dest), 'size': parse_size(line, dest), 'facility': parse_facility(line, dest), 'level': parse_level(line, dest) }) elif validate_ip_address(match.group(1)): dest = 'host' obj.append({ 'dest': dest, 'name': match.group(1), 'size': parse_size(line, dest), 'facility': parse_facility(line, dest), 'level': parse_level(line, dest) }) else: ip_match = re.search(r'\d+\.\d+\.\d+\.\d+', match.group(1), re.M) if ip_match: dest = 'host' obj.append({ 'dest': dest, 'name': match.group(1), 'size': parse_size(line, dest), 'facility': parse_facility(line, dest), 'level': parse_level(line, dest) }) return obj def map_params_to_obj(module, required_if=None): obj = [] aggregate = module.params.get('aggregate') if aggregate: for item in aggregate: for key in item: if item.get(key) is None: item[key] = module.params[key] module._check_required_if(required_if, item) d = item.copy() if d['dest'] != 'host': d['name'] = None if d['dest'] == 'buffered': if 'size' in d: d['size'] = str(validate_size(d['size'], module)) elif 'size' not in d: d['size'] = str(4096) else: pass if d['dest'] != 'buffered': d['size'] = None obj.append(d) else: if module.params['dest'] != 'host': module.params['name'] = None if module.params['dest'] == 'buffered': if not module.params['size']: module.params['size'] = str(4096) else: module.params['size'] = None if module.params['size'] is None: obj.append({ 'dest': module.params['dest'], 'name': module.params['name'], 'size': module.params['size'], 'facility': module.params['facility'], 'level': module.params['level'], 'state': module.params['state'] }) else: obj.append({ 'dest': module.params['dest'], 'name': module.params['name'], 'size': str(validate_size(module.params['size'], module)), 'facility': module.params['facility'], 'level': module.params['level'], 'state': module.params['state'] }) return obj def main(): """ main entry point for module execution """ element_spec = dict( dest=dict(type='str', choices=['on', 'host', 'console', 'monitor', 'buffered', 'trap']), name=dict(type='str'), size=dict(type='int'), facility=dict(type='str'), level=dict(type='str', default='debugging', choices=['emergencies', 'alerts', 'critical', 'errors', 'warnings', 'notifications', 'informational', 'debugging']), state=dict(default='present', choices=['present', 'absent']), ) aggregate_spec = deepcopy(element_spec) # remove default in aggregate spec, to handle common arguments remove_default_spec(aggregate_spec) argument_spec = dict( aggregate=dict(type='list', elements='dict', options=aggregate_spec), ) argument_spec.update(element_spec) argument_spec.update(ios_argument_spec) required_if = [('dest', 'host', ['name'])] module = AnsibleModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) device_info = get_capabilities(module) os_version = device_info['device_info']['network_os_version'] warnings = list() check_args(module, warnings) result = {'changed': False} if warnings: result['warnings'] = warnings want = map_params_to_obj(module, required_if=required_if) have = map_config_to_obj(module) commands = map_obj_to_commands((want, have), module, os_version) result['commands'] = commands if commands: if not module.check_mode: load_config(module, commands) result['changed'] = True module.exit_json(**result) if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,553
Dead docs link to galaxy settings
##### SUMMARY https://github.com/ansible/ansible/blob/6ad40fd6b8b6319f9f3fff7bc6f854e951a82396/docs/docsite/rst/reference_appendices/galaxy.rst#L30-L31 in the text > The command line tool by default communicates with the Galaxy website API using the server address https://galaxy.ansible.com. Since the Galaxy project is an open source project, you may be running your own internal Galaxy server and wish to override the default server address. You can do this using the --server option or by setting the Galaxy server value in your ansible.cfg file. For information on setting the value in ansible.cfg visit Galaxy Settings. The word "Galaxy Settings" links to `/docs/docsite/rst/reference_appendices/intro_configuration.html#galaxy-settings`, which does not exist. ~Perhaps it should link to https://github.com/ansible/ansible/blob/6ad40fd6b8b6319f9f3fff7bc6f854e951a82396/docs/docsite/rst/reference_appendices/general_precedence.rst?~ I don't know where it should link to. ##### ISSUE TYPE - Documentation Report ##### COMPONENT NAME docs/docsite/rst/reference_appendices/galaxy.rst ##### ANSIBLE VERSION current ##### CONFIGURATION N/A ##### OS / ENVIRONMENT N/A ##### ADDITIONAL INFORMATION https://docs.ansible.com/ansible/latest/reference_appendices/galaxy.html Ping @jborean93
https://github.com/ansible/ansible/issues/61553
https://github.com/ansible/ansible/pull/61823
173d47d1f429847ca351da92b69a0e05d25bb313
2fbe4ca102324661eb0c04de3f8fc7e96e472bbb
2019-08-29T14:06:57Z
python
2019-09-05T14:47:19Z
docs/docsite/rst/reference_appendices/galaxy.rst
.. _ansible_galaxy: Ansible Galaxy ++++++++++++++ *Ansible Galaxy* refers to the `Galaxy <https://galaxy.ansible.com>`_ website where users can share roles, and to a command line tool for installing, creating and managing roles. .. contents:: Topics The Website ``````````` `Galaxy <https://galaxy.ansible.com>`_, is a free site for finding, downloading, and sharing community developed roles. Downloading roles from Galaxy is a great way to jumpstart your automation projects. You can also use the site to share roles that you create. By authenticating with the site using your GitHub account, you're able to *import* roles, making them available to the Ansible community. Imported roles become available in the Galaxy search index and visible on the site, allowing users to discover and download them. Learn more by viewing `the About page <https://galaxy.ansible.com/docs/>`_. The command line tool ````````````````````` The ``ansible-galaxy`` command comes bundled with Ansible, and you can use it to install roles from Galaxy or directly from a git based SCM. You can also use it to create a new role, remove roles, or perform tasks on the Galaxy website. The command line tool by default communicates with the Galaxy website API using the server address *https://galaxy.ansible.com*. Since the `Galaxy project <https://github.com/ansible/galaxy>`_ is an open source project, you may be running your own internal Galaxy server and wish to override the default server address. You can do this using the *--server* option or by setting the Galaxy server value in your *ansible.cfg* file. For information on setting the value in *ansible.cfg* visit `Galaxy Settings <./intro_configuration.html#galaxy-settings>`_. Installing Roles ---------------- Use the ``ansible-galaxy`` command to download roles from the `Galaxy website <https://galaxy.ansible.com>`_ :: $ ansible-galaxy install username.role_name roles_path ========== By default Ansible downloads roles to the first writable directory in the default list of paths ``~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles``. This will install roles in the home directory of the user running ``ansible-galaxy``. You can override this by setting the environment variable :envvar:`ANSIBLE_ROLES_PATH` in your session, defining ``roles_path`` in an ``ansible.cfg`` file, or by using the ``--roles-path`` option. The following provides an example of using ``--roles-path`` to install the role into the current working directory: :: $ ansible-galaxy install --roles-path . geerlingguy.apache .. seealso:: :ref:`intro_configuration` All about configuration files version ======= You can install a specific version of a role from Galaxy by appending a comma and the value of a GitHub release tag. For example: :: $ ansible-galaxy install geerlingguy.apache,v1.0.0 It's also possible to point directly to the git repository and specify a branch name or commit hash as the version. For example, the following will install a specific commit: :: $ ansible-galaxy install git+https://github.com/geerlingguy/ansible-role-apache.git,0b7cd353c0250e87a26e0499e59e7fd265cc2f25 Installing multiple roles from a file ===================================== Beginning with Ansible 1.8 it is possible to install multiple roles by including the roles in a *requirements.yml* file. The format of the file is YAML, and the file extension must be either *.yml* or *.yaml*. Use the following command to install roles included in *requirements.yml*: :: $ ansible-galaxy install -r requirements.yml Again, the extension is important. If the *.yml* extension is left off, the ``ansible-galaxy`` CLI assumes the file is in an older, now deprecated, "basic" format. Each role in the file will have one or more of the following attributes: src The source of the role. Use the format *username.role_name*, if downloading from Galaxy; otherwise, provide a URL pointing to a repository within a git based SCM. See the examples below. This is a required attribute. scm Specify the SCM. As of this writing only *git* or *hg* are allowed. See the examples below. Defaults to *git*. version: The version of the role to download. Provide a release tag value, commit hash, or branch name. Defaults to the branch set as a default in the repository, otherwise defaults to the *master*. name: Download the role to a specific name. Defaults to the Galaxy name when downloading from Galaxy, otherwise it defaults to the name of the repository. Use the following example as a guide for specifying roles in *requirements.yml*: :: # from galaxy - src: yatesr.timezone # from GitHub - src: https://github.com/bennojoy/nginx # from GitHub, overriding the name and specifying a specific tag - src: https://github.com/bennojoy/nginx version: master name: nginx_role # from a webserver, where the role is packaged in a tar.gz - src: https://some.webserver.example.com/files/master.tar.gz name: http-role-gz # from a webserver, where the role is packaged in a tar.bz2 - src: https://some.webserver.example.com/files/master.tar.bz2 name: http-role-bz2 # from a webserver, where the role is packaged in a tar.xz (Python 3.x only) - src: https://some.webserver.example.com/files/master.tar.xz name: http-role-xz # from Bitbucket - src: git+https://bitbucket.org/willthames/git-ansible-galaxy version: v1.4 # from Bitbucket, alternative syntax and caveats - src: https://bitbucket.org/willthames/hg-ansible-galaxy scm: hg # from GitLab or other git-based scm, using git+ssh - src: [email protected]:mygroup/ansible-base.git scm: git version: "0.1" # quoted, so YAML doesn't parse this as a floating-point value Installing multiple roles from multiple files ============================================= At a basic level, including requirements files allows you to break up bits of roles into smaller files. Role includes pull in roles from other files. Use the following command to install roles includes in *requirements.yml* + *webserver.yml* :: ansible-galaxy install -r requirements.yml Content of the *requirements.yml* file: :: # from galaxy - src: yatesr.timezone - include: <path_to_requirements>/webserver.yml Content of the *webserver.yml* file: :: # from github - src: https://github.com/bennojoy/nginx # from Bitbucket - src: git+https://bitbucket.org/willthames/git-ansible-galaxy version: v1.4 .. _galaxy_dependencies: Dependencies ============ Roles can also be dependent on other roles, and when you install a role that has dependencies, those dependencies will automatically be installed. You specify role dependencies in the ``meta/main.yml`` file by providing a list of roles. If the source of a role is Galaxy, you can simply specify the role in the format ``username.role_name``. You can also use the more complex format in ``requirements.yml``, allowing you to provide ``src``, ``scm``, ``version``, and ``name``. Tags are inherited *down* the dependency chain. In order for tags to be applied to a role and all its dependencies, the tag should be applied to the role, not to all the tasks within a role. Roles listed as dependencies are subject to conditionals and tag filtering, and may not execute fully depending on what tags and conditionals are applied. Dependencies found in Galaxy can be specified as follows: :: dependencies: - geerlingguy.apache - geerlingguy.ansible The complex form can also be used as follows: :: dependencies: - src: geerlingguy.ansible - src: git+https://github.com/geerlingguy/ansible-role-composer.git version: 775396299f2da1f519f0d8885022ca2d6ee80ee8 name: composer When dependencies are encountered by ``ansible-galaxy``, it will automatically install each dependency to the ``roles_path``. To understand how dependencies are handled during play execution, see :ref:`playbooks_reuse_roles`. .. note:: At the time of this writing, the Galaxy website expects all role dependencies to exist in Galaxy, and therefore dependencies to be specified in the ``username.role_name`` format. If you import a role with a dependency where the ``src`` value is a URL, the import process will fail. Create roles ------------ Use the ``init`` command to initialize the base structure of a new role, saving time on creating the various directories and main.yml files a role requires :: $ ansible-galaxy init role_name The above will create the following directory structure in the current working directory: :: role_name/ README.md .travis.yml defaults/ main.yml files/ handlers/ main.yml meta/ main.yml templates/ tests/ inventory test.yml vars/ main.yml If you want to create a repository for the role the repository root should be `role_name`. Force ===== If a directory matching the name of the role already exists in the current working directory, the init command will result in an error. To ignore the error use the *--force* option. Force will create the above subdirectories and files, replacing anything that matches. Container Enabled ================= If you are creating a Container Enabled role, pass ``--type container`` to ``ansible-galaxy init``. This will create the same directory structure as above, but populate it with default files appropriate for a Container Enabled role. For instance, the README.md has a slightly different structure, the *.travis.yml* file tests the role using `Ansible Container <https://github.com/ansible/ansible-container>`_, and the meta directory includes a *container.yml* file. Using a Custom Role Skeleton ============================ A custom role skeleton directory can be supplied as follows: :: $ ansible-galaxy init --role-skeleton=/path/to/skeleton role_name When a skeleton is provided, init will: - copy all files and directories from the skeleton to the new role - any .j2 files found outside of a templates folder will be rendered as templates. The only useful variable at the moment is role_name - The .git folder and any .git_keep files will not be copied Alternatively, the role_skeleton and ignoring of files can be configured via ansible.cfg :: [galaxy] role_skeleton = /path/to/skeleton role_skeleton_ignore = ^.git$,^.*/.git_keep$ Search for Roles ---------------- Search the Galaxy database by tags, platforms, author and multiple keywords. For example: :: $ ansible-galaxy search elasticsearch --author geerlingguy The search command will return a list of the first 1000 results matching your search: :: Found 2 roles matching your search: Name Description ---- ----------- geerlingguy.elasticsearch Elasticsearch for Linux. geerlingguy.elasticsearch-curator Elasticsearch curator for Linux. Get more information about a role --------------------------------- Use the ``info`` command to view more detail about a specific role: :: $ ansible-galaxy info username.role_name This returns everything found in Galaxy for the role: :: Role: username.role_name description: Installs and configures a thing, a distributed, highly available NoSQL thing. active: True commit: c01947b7bc89ebc0b8a2e298b87ab416aed9dd57 commit_message: Adding travis commit_url: https://github.com/username/repo_name/commit/c01947b7bc89ebc0b8a2e298b87ab company: My Company, Inc. created: 2015-12-08T14:17:52.773Z download_count: 1 forks_count: 0 github_branch: github_repo: repo_name github_user: username id: 6381 is_valid: True issue_tracker_url: license: Apache min_ansible_version: 1.4 modified: 2015-12-08T18:43:49.085Z namespace: username open_issues_count: 0 path: /Users/username/projects/roles scm: None src: username.repo_name stargazers_count: 0 travis_status_url: https://travis-ci.org/username/repo_name.svg?branch=master version: watchers_count: 1 List installed roles -------------------- Use ``list`` to show the name and version of each role installed in the *roles_path*. :: $ ansible-galaxy list - chouseknecht.role-install_mongod, master - chouseknecht.test-role-1, v1.0.2 - chrismeyersfsu.role-iptables, master - chrismeyersfsu.role-required_vars, master Remove an installed role ------------------------ Use ``remove`` to delete a role from *roles_path*: :: $ ansible-galaxy remove username.role_name Authenticate with Galaxy ------------------------ Using the ``import``, ``delete`` and ``setup`` commands to manage your roles on the Galaxy website requires authentication, and the ``login`` command can be used to do just that. Before you can use the ``login`` command, you must create an account on the Galaxy website. The ``login`` command requires using your GitHub credentials. You can use your username and password, or you can create a `personal access token <https://help.github.com/articles/creating-an-access-token-for-command-line-use/>`_. If you choose to create a token, grant minimal access to the token, as it is used just to verify identify. The following shows authenticating with the Galaxy website using a GitHub username and password: :: $ ansible-galaxy login We need your GitHub login to identify you. This information will not be sent to Galaxy, only to api.github.com. The password will not be displayed. Use --github-token if you do not want to enter your password. GitHub Username: dsmith Password for dsmith: Successfully logged into Galaxy as dsmith When you choose to use your username and password, your password is not sent to Galaxy. It is used to authenticates with GitHub and create a personal access token. It then sends the token to Galaxy, which in turn verifies that your identity and returns a Galaxy access token. After authentication completes the GitHub token is destroyed. If you do not wish to use your GitHub password, or if you have two-factor authentication enabled with GitHub, use the *--github-token* option to pass a personal access token that you create. Import a role ------------- The ``import`` command requires that you first authenticate using the ``login`` command. Once authenticated you can import any GitHub repository that you own or have been granted access. Use the following to import to role: :: $ ansible-galaxy import github_user github_repo By default the command will wait for Galaxy to complete the import process, displaying the results as the import progresses: :: Successfully submitted import request 41 Starting import 41: role_name=myrole repo=githubuser/ansible-role-repo ref= Retrieving GitHub repo githubuser/ansible-role-repo Accessing branch: master Parsing and validating meta/main.yml Parsing galaxy_tags Parsing platforms Adding dependencies Parsing and validating README.md Adding repo tags as role versions Import completed Status SUCCESS : warnings=0 errors=0 Branch ====== Use the *--branch* option to import a specific branch. If not specified, the default branch for the repo will be used. Role name ========= By default the name given to the role will be derived from the GitHub repository name. However, you can use the *--role-name* option to override this and set the name. No wait ======= If the *--no-wait* option is present, the command will not wait for results. Results of the most recent import for any of your roles is available on the Galaxy web site by visiting *My Imports*. Delete a role ------------- The ``delete`` command requires that you first authenticate using the ``login`` command. Once authenticated you can remove a role from the Galaxy web site. You are only allowed to remove roles where you have access to the repository in GitHub. Use the following to delete a role: :: $ ansible-galaxy delete github_user github_repo This only removes the role from Galaxy. It does not remove or alter the actual GitHub repository. Travis integrations ------------------- You can create an integration or connection between a role in Galaxy and `Travis <https://travis-ci.org>`_. Once the connection is established, a build in Travis will automatically trigger an import in Galaxy, updating the search index with the latest information about the role. You create the integration using the ``setup`` command, but before an integration can be created, you must first authenticate using the ``login`` command; you will also need an account in Travis, and your Travis token. Once you're ready, use the following command to create the integration: :: $ ansible-galaxy setup travis github_user github_repo xxx-travis-token-xxx The setup command requires your Travis token, however the token is not stored in Galaxy. It is used along with the GitHub username and repo to create a hash as described in `the Travis documentation <https://docs.travis-ci.com/user/notifications/>`_. The hash is stored in Galaxy and used to verify notifications received from Travis. The setup command enables Galaxy to respond to notifications. To configure Travis to run a build on your repository and send a notification, follow the `Travis getting started guide <https://docs.travis-ci.com/user/getting-started/>`_. To instruct Travis to notify Galaxy when a build completes, add the following to your .travis.yml file: :: notifications: webhooks: https://galaxy.ansible.com/api/v1/notifications/ List Travis integrations ======================== Use the *--list* option to display your Travis integrations: :: $ ansible-galaxy setup --list ID Source Repo ---------- ---------- ---------- 2 travis github_user/github_repo 1 travis github_user/github_repo Remove Travis integrations ========================== Use the *--remove* option to disable and remove a Travis integration: :: $ ansible-galaxy setup --remove ID Provide the ID of the integration to be disabled. You can find the ID by using the *--list* option. .. seealso:: :ref:`playbooks_reuse_roles` All about ansible roles `Mailing List <https://groups.google.com/group/ansible-project>`_ Questions? Help? Ideas? Stop by the list on Google Groups `irc.freenode.net <http://irc.freenode.net>`_ #ansible IRC chat channel
closed
ansible/ansible
https://github.com/ansible/ansible
58,592
vmware_guest_disk: add disk_mode parameter
<!--- Verify first that your feature was not already discussed on GitHub --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Describe the new feature/improvement briefly below --> vmware_guest_disk should propose `disk_mode` parameter (like vmware_guest module). The playbook bellow does not raise an error but disk is always added in `persistent` mode, even I set `independent_persistent` for `disk_mode` var. If it's a bug please could you fix it? If it's not consider it as a feature request :) ##### ISSUE TYPE - Feature Idea (or maybe a bug) ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> vmware_guest_disk ##### ADDITIONAL INFORMATION <!--- Describe how the feature would be used, why it is needed and what it would solve --> <!--- Paste example playbooks or commands between quotes below --> ```yaml --- - name: get vm {{ target | upper }} facts vmware_guest_facts: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" datacenter: "{{ datacenter }}" validate_certs: no name: "{{ target | upper }}" register: vm_facts delegate_to: localhost - name: add disk to vm {{ target | upper }} vmware_guest_disk: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" datacenter: "{{ datacenter }}" validate_certs: no name: "{{ target | upper }}" disk: - size_gb: "{{ size_gb }}" type: "{{ type }}" disk_mode: "{{ disk_mode }}" scsi_controller: "{{ scsi_controller }}" unit_number: "{{ unit_number }}" datastore: "{{ vm_facts.instance.hw_datastores | first }}" delegate_to: localhost ``` <!--- HINT: You can also paste gist.github.com links for larger files -->
https://github.com/ansible/ansible/issues/58592
https://github.com/ansible/ansible/pull/60406
1aca1f86b6037def60bf7460c0c702fe7fdd21ac
513bddb44ad2840155cdd66ff42e9b9074632053
2019-07-01T15:03:30Z
python
2019-09-05T15:52:12Z
lib/ansible/modules/cloud/vmware/vmware_guest_disk.py
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # Copyright: (c) 2018, Abhijeet Kasurde <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: vmware_guest_disk short_description: Manage disks related to virtual machine in given vCenter infrastructure description: - This module can be used to add, remove and update disks belonging to given virtual machine. - All parameters and VMware object names are case sensitive. - This module is destructive in nature, please read documentation carefully before proceeding. - Be careful while removing disk specified as this may lead to data loss. version_added: 2.8 author: - Abhijeet Kasurde (@Akasurde) <[email protected]> notes: - Tested on vSphere 6.0 and 6.5 requirements: - "python >= 2.6" - PyVmomi options: name: description: - Name of the virtual machine. - This is a required parameter, if parameter C(uuid) or C(moid) is not supplied. type: str uuid: description: - UUID of the instance to gather facts if known, this is VMware's unique identifier. - This is a required parameter, if parameter C(name) or C(moid) is not supplied. type: str moid: description: - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance. - This is required if C(name) or C(uuid) is not supplied. version_added: '2.9' type: str folder: description: - Destination folder, absolute or relative path to find an existing guest. - This is a required parameter, only if multiple VMs are found with same name. - The folder should include the datacenter. ESX's datacenter is ha-datacenter - 'Examples:' - ' folder: /ha-datacenter/vm' - ' folder: ha-datacenter/vm' - ' folder: /datacenter1/vm' - ' folder: datacenter1/vm' - ' folder: /datacenter1/vm/folder1' - ' folder: datacenter1/vm/folder1' - ' folder: /folder1/datacenter1/vm' - ' folder: folder1/datacenter1/vm' - ' folder: /folder1/datacenter1/vm/folder2' type: str datacenter: description: - The datacenter name to which virtual machine belongs to. required: True type: str use_instance_uuid: description: - Whether to use the VMware instance UUID rather than the BIOS UUID. default: no type: bool version_added: '2.8' disk: description: - A list of disks to add. - The virtual disk related information is provided using this list. - All values and parameters are case sensitive. - 'Valid attributes are:' - ' - C(size[_tb,_gb,_mb,_kb]) (integer): Disk storage size in specified unit.' - ' If C(size) specified then unit must be specified. There is no space allowed in between size number and unit.' - ' Only first occurrence in disk element will be considered, even if there are multiple size* parameters available.' - ' - C(type) (string): Valid values are:' - ' - C(thin) thin disk' - ' - C(eagerzeroedthick) eagerzeroedthick disk' - ' - C(thick) thick disk' - ' Default: C(thick) thick disk, no eagerzero.' - ' - C(datastore) (string): Name of datastore or datastore cluster to be used for the disk.' - ' - C(autoselect_datastore) (bool): Select the less used datastore. Specify only if C(datastore) is not specified.' - ' - C(scsi_controller) (integer): SCSI controller number. Valid value range from 0 to 3.' - ' Only 4 SCSI controllers are allowed per VM.' - ' Care should be taken while specifying C(scsi_controller) is 0 and C(unit_number) as 0 as this disk may contain OS.' - ' - C(unit_number) (integer): Disk Unit Number. Valid value range from 0 to 15. Only 15 disks are allowed per SCSI Controller.' - ' - C(scsi_type) (string): Type of SCSI controller. This value is required only for the first occurance of SCSI Controller.' - ' This value is ignored, if SCSI Controller is already present or C(state) is C(absent).' - ' Valid values are C(buslogic), C(lsilogic), C(lsilogicsas) and C(paravirtual).' - ' C(paravirtual) is default value for this parameter.' - ' - C(state) (string): State of disk. This is either "absent" or "present".' - ' If C(state) is set to C(absent), disk will be removed permanently from virtual machine configuration and from VMware storage.' - ' If C(state) is set to C(present), disk will be added if not present at given SCSI Controller and Unit Number.' - ' If C(state) is set to C(present) and disk exists with different size, disk size is increased.' - ' Reducing disk size is not allowed.' default: [] type: list extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' - name: Add disks to virtual machine using UUID vmware_guest_disk: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" datacenter: "{{ datacenter_name }}" validate_certs: no uuid: 421e4592-c069-924d-ce20-7e7533fab926 disk: - size_mb: 10 type: thin datastore: datacluster0 state: present scsi_controller: 1 unit_number: 1 scsi_type: 'paravirtual' - size_gb: 10 type: eagerzeroedthick state: present autoselect_datastore: True scsi_controller: 2 scsi_type: 'buslogic' unit_number: 12 - size: 10Gb type: eagerzeroedthick state: present autoselect_datastore: True scsi_controller: 2 scsi_type: 'buslogic' unit_number: 1 delegate_to: localhost register: disk_facts - name: Remove disks from virtual machine using name vmware_guest_disk: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" datacenter: "{{ datacenter_name }}" validate_certs: no name: VM_225 disk: - state: absent scsi_controller: 1 unit_number: 1 delegate_to: localhost register: disk_facts - name: Remove disks from virtual machine using moid vmware_guest_disk: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" datacenter: "{{ datacenter_name }}" validate_certs: no moid: vm-42 disk: - state: absent scsi_controller: 1 unit_number: 1 delegate_to: localhost register: disk_facts ''' RETURN = """ disk_status: description: metadata about the virtual machine's disks after managing them returned: always type: dict sample: { "0": { "backing_datastore": "datastore2", "backing_disk_mode": "persistent", "backing_eagerlyscrub": false, "backing_filename": "[datastore2] VM_225/VM_225.vmdk", "backing_thinprovisioned": false, "backing_writethrough": false, "capacity_in_bytes": 10485760, "capacity_in_kb": 10240, "controller_key": 1000, "key": 2000, "label": "Hard disk 1", "summary": "10,240 KB", "unit_number": 0 }, } """ import re try: from pyVmomi import vim except ImportError: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task, find_obj, get_all_objs class PyVmomiHelper(PyVmomi): def __init__(self, module): super(PyVmomiHelper, self).__init__(module) self.desired_disks = self.params['disk'] # Match with vmware_guest parameter self.vm = None self.scsi_device_type = dict(lsilogic=vim.vm.device.VirtualLsiLogicController, paravirtual=vim.vm.device.ParaVirtualSCSIController, buslogic=vim.vm.device.VirtualBusLogicController, lsilogicsas=vim.vm.device.VirtualLsiLogicSASController) self.config_spec = vim.vm.ConfigSpec() self.config_spec.deviceChange = [] def create_scsi_controller(self, scsi_type, scsi_bus_number): """ Create SCSI Controller with given SCSI Type and SCSI Bus Number Args: scsi_type: Type of SCSI scsi_bus_number: SCSI Bus number to be assigned Returns: Virtual device spec for SCSI Controller """ scsi_ctl = vim.vm.device.VirtualDeviceSpec() scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add scsi_ctl.device = self.scsi_device_type[scsi_type]() scsi_ctl.device.unitNumber = 3 scsi_ctl.device.busNumber = scsi_bus_number scsi_ctl.device.hotAddRemove = True scsi_ctl.device.sharedBus = 'noSharing' scsi_ctl.device.scsiCtlrUnitNumber = 7 return scsi_ctl @staticmethod def create_scsi_disk(scsi_ctl_key, disk_index): """ Create Virtual Device Spec for virtual disk Args: scsi_ctl_key: Unique SCSI Controller Key disk_index: Disk unit number at which disk needs to be attached Returns: Virtual Device Spec for virtual disk """ disk_spec = vim.vm.device.VirtualDeviceSpec() disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create disk_spec.device = vim.vm.device.VirtualDisk() disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo() disk_spec.device.backing.diskMode = 'persistent' disk_spec.device.controllerKey = scsi_ctl_key disk_spec.device.unitNumber = disk_index return disk_spec def reconfigure_vm(self, config_spec, device_type): """ Reconfigure virtual machine after modifying device spec Args: config_spec: Config Spec device_type: Type of device being modified Returns: Boolean status 'changed' and actual task result """ changed, results = (False, '') try: # Perform actual VM reconfiguration task = self.vm.ReconfigVM_Task(spec=config_spec) changed, results = wait_for_task(task) except vim.fault.InvalidDeviceSpec as invalid_device_spec: self.module.fail_json(msg="Failed to manage %s on given virtual machine due to invalid" " device spec : %s" % (device_type, to_native(invalid_device_spec.msg)), details="Please check ESXi server logs for more details.") except vim.fault.RestrictedVersion as e: self.module.fail_json(msg="Failed to reconfigure virtual machine due to" " product versioning restrictions: %s" % to_native(e.msg)) return changed, results def ensure_disks(self, vm_obj=None): """ Manage internal state of virtual machine disks Args: vm_obj: Managed object of virtual machine """ # Set vm object self.vm = vm_obj # Sanitize user input disk_data = self.sanitize_disk_inputs() # Create stateful information about SCSI devices current_scsi_info = dict() results = dict(changed=False, disk_data=None, disk_changes=dict()) # Deal with SCSI Controller for device in vm_obj.config.hardware.device: if isinstance(device, tuple(self.scsi_device_type.values())): # Found SCSI device if device.busNumber not in current_scsi_info: device_bus_number = 1000 + device.busNumber current_scsi_info[device_bus_number] = dict(disks=dict()) scsi_changed = False for disk in disk_data: scsi_controller = disk['scsi_controller'] + 1000 if scsi_controller not in current_scsi_info and disk['state'] == 'present': scsi_ctl = self.create_scsi_controller(disk['scsi_type'], disk['scsi_controller']) current_scsi_info[scsi_controller] = dict(disks=dict()) self.config_spec.deviceChange.append(scsi_ctl) scsi_changed = True if scsi_changed: self.reconfigure_vm(self.config_spec, 'SCSI Controller') self.config_spec = vim.vm.ConfigSpec() self.config_spec.deviceChange = [] # Deal with Disks for device in vm_obj.config.hardware.device: if isinstance(device, vim.vm.device.VirtualDisk): # Found Virtual Disk device if device.controllerKey not in current_scsi_info: current_scsi_info[device.controllerKey] = dict(disks=dict()) current_scsi_info[device.controllerKey]['disks'][device.unitNumber] = device vm_name = self.vm.name disk_change_list = [] for disk in disk_data: disk_change = False scsi_controller = disk['scsi_controller'] + 1000 # VMware auto assign 1000 + SCSI Controller if disk['disk_unit_number'] not in current_scsi_info[scsi_controller]['disks'] and disk['state'] == 'present': # Add new disk disk_spec = self.create_scsi_disk(scsi_controller, disk['disk_unit_number']) disk_spec.device.capacityInKB = disk['size'] if disk['disk_type'] == 'thin': disk_spec.device.backing.thinProvisioned = True elif disk['disk_type'] == 'eagerzeroedthick': disk_spec.device.backing.eagerlyScrub = True disk_spec.device.backing.fileName = "[%s] %s/%s_%s_%s.vmdk" % (disk['datastore'].name, vm_name, vm_name, str(scsi_controller), str(disk['disk_unit_number'])) disk_spec.device.backing.datastore = disk['datastore'] self.config_spec.deviceChange.append(disk_spec) disk_change = True current_scsi_info[scsi_controller]['disks'][disk['disk_unit_number']] = disk_spec.device results['disk_changes'][disk['disk_index']] = "Disk created." elif disk['disk_unit_number'] in current_scsi_info[scsi_controller]['disks']: if disk['state'] == 'present': disk_spec = vim.vm.device.VirtualDeviceSpec() # set the operation to edit so that it knows to keep other settings disk_spec.device = current_scsi_info[scsi_controller]['disks'][disk['disk_unit_number']] # Edit and no resizing allowed if disk['size'] < disk_spec.device.capacityInKB: self.module.fail_json(msg="Given disk size at disk index [%s] is smaller than found (%d < %d)." " Reducing disks is not allowed." % (disk['disk_index'], disk['size'], disk_spec.device.capacityInKB)) if disk['size'] != disk_spec.device.capacityInKB: disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit disk_spec.device.capacityInKB = disk['size'] self.config_spec.deviceChange.append(disk_spec) disk_change = True results['disk_changes'][disk['disk_index']] = "Disk size increased." else: results['disk_changes'][disk['disk_index']] = "Disk already exists." elif disk['state'] == 'absent': # Disk already exists, deleting disk_spec = vim.vm.device.VirtualDeviceSpec() disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.destroy disk_spec.device = current_scsi_info[scsi_controller]['disks'][disk['disk_unit_number']] self.config_spec.deviceChange.append(disk_spec) disk_change = True results['disk_changes'][disk['disk_index']] = "Disk deleted." if disk_change: # Adding multiple disks in a single attempt raises weird errors # So adding single disk at a time. self.reconfigure_vm(self.config_spec, 'disks') self.config_spec = vim.vm.ConfigSpec() self.config_spec.deviceChange = [] disk_change_list.append(disk_change) if any(disk_change_list): results['changed'] = True results['disk_data'] = self.gather_disk_facts(vm_obj=self.vm) self.module.exit_json(**results) def sanitize_disk_inputs(self): """ Check correctness of disk input provided by user Returns: A list of dictionary containing disk information """ disks_data = list() if not self.desired_disks: self.module.exit_json(changed=False, msg="No disks provided for virtual" " machine '%s' for management." % self.vm.name) for disk_index, disk in enumerate(self.desired_disks): # Initialize default value for disk current_disk = dict(disk_index=disk_index, state='present', datastore=None, autoselect_datastore=True, disk_unit_number=0, scsi_controller=0) # Check state if 'state' in disk: if disk['state'] not in ['absent', 'present']: self.module.fail_json(msg="Invalid state provided '%s' for disk index [%s]." " State can be either - 'absent', 'present'" % (disk['state'], disk_index)) else: current_disk['state'] = disk['state'] if current_disk['state'] == 'present': # Select datastore or datastore cluster if 'datastore' in disk: if 'autoselect_datastore' in disk: self.module.fail_json(msg="Please specify either 'datastore' " "or 'autoselect_datastore' for disk index [%s]" % disk_index) # Check if given value is datastore or datastore cluster datastore_name = disk['datastore'] datastore_cluster = find_obj(self.content, [vim.StoragePod], datastore_name) if datastore_cluster: # If user specified datastore cluster so get recommended datastore datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster) # Check if get_recommended_datastore or user specified datastore exists or not datastore = find_obj(self.content, [vim.Datastore], datastore_name) if datastore is None: self.module.fail_json(msg="Failed to find datastore named '%s' " "in given configuration." % disk['datastore']) current_disk['datastore'] = datastore current_disk['autoselect_datastore'] = False elif 'autoselect_datastore' in disk: # Find datastore which fits requirement datastores = get_all_objs(self.content, [vim.Datastore]) if not datastores: self.module.fail_json(msg="Failed to gather information about" " available datastores in given datacenter.") datastore = None datastore_freespace = 0 for ds in datastores: if ds.summary.freeSpace > datastore_freespace: # If datastore field is provided, filter destination datastores datastore = ds datastore_freespace = ds.summary.freeSpace current_disk['datastore'] = datastore if 'datastore' not in disk and 'autoselect_datastore' not in disk: self.module.fail_json(msg="Either 'datastore' or 'autoselect_datastore' is" " required parameter while creating disk for " "disk index [%s]." % disk_index) if [x for x in disk.keys() if x.startswith('size_') or x == 'size']: # size, size_tb, size_gb, size_mb, size_kb disk_size_parse_failed = False if 'size' in disk: size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])') disk_size_m = size_regex.match(disk['size']) if disk_size_m: expected = disk_size_m.group(1) unit = disk_size_m.group(2) else: disk_size_parse_failed = True try: if re.match(r'\d+\.\d+', expected): # We found float value in string, let's typecast it expected = float(expected) else: # We found int value in string, let's typecast it expected = int(expected) except (TypeError, ValueError, NameError): disk_size_parse_failed = True else: # Even multiple size_ parameter provided by user, # consider first value only param = [x for x in disk.keys() if x.startswith('size_')][0] unit = param.split('_')[-1] disk_size = disk[param] if isinstance(disk_size, (float, int)): disk_size = str(disk_size) try: if re.match(r'\d+\.\d+', disk_size): # We found float value in string, let's typecast it expected = float(disk_size) else: # We found int value in string, let's typecast it expected = int(disk_size) except (TypeError, ValueError, NameError): disk_size_parse_failed = True if disk_size_parse_failed: # Common failure self.module.fail_json(msg="Failed to parse disk size for disk index [%s]," " please review value provided" " using documentation." % disk_index) disk_units = dict(tb=3, gb=2, mb=1, kb=0) unit = unit.lower() if unit in disk_units: current_disk['size'] = expected * (1024 ** disk_units[unit]) else: self.module.fail_json(msg="%s is not a supported unit for disk size for disk index [%s]." " Supported units are ['%s']." % (unit, disk_index, "', '".join(disk_units.keys()))) else: # No size found but disk, fail self.module.fail_json(msg="No size, size_kb, size_mb, size_gb or size_tb" " attribute found into disk index [%s] configuration." % disk_index) # Check SCSI controller key if 'scsi_controller' in disk: try: temp_disk_controller = int(disk['scsi_controller']) except ValueError: self.module.fail_json(msg="Invalid SCSI controller ID '%s' specified" " at index [%s]" % (disk['scsi_controller'], disk_index)) if temp_disk_controller not in range(0, 4): # Only 4 SCSI controllers are allowed per VM self.module.fail_json(msg="Invalid SCSI controller ID specified [%s]," " please specify value between 0 to 3 only." % temp_disk_controller) current_disk['scsi_controller'] = temp_disk_controller else: self.module.fail_json(msg="Please specify 'scsi_controller' under disk parameter" " at index [%s], which is required while creating disk." % disk_index) # Check for disk unit number if 'unit_number' in disk: try: temp_disk_unit_number = int(disk['unit_number']) except ValueError: self.module.fail_json(msg="Invalid Disk unit number ID '%s'" " specified at index [%s]" % (disk['unit_number'], disk_index)) if temp_disk_unit_number not in range(0, 16): self.module.fail_json(msg="Invalid Disk unit number ID specified for disk [%s] at index [%s]," " please specify value between 0 to 15" " only (excluding 7)." % (temp_disk_unit_number, disk_index)) if temp_disk_unit_number == 7: self.module.fail_json(msg="Invalid Disk unit number ID specified for disk at index [%s]," " please specify value other than 7 as it is reserved" "for SCSI Controller" % disk_index) current_disk['disk_unit_number'] = temp_disk_unit_number else: self.module.fail_json(msg="Please specify 'unit_number' under disk parameter" " at index [%s], which is required while creating disk." % disk_index) # Type of Disk disk_type = disk.get('type', 'thick').lower() if disk_type not in ['thin', 'thick', 'eagerzeroedthick']: self.module.fail_json(msg="Invalid 'disk_type' specified for disk index [%s]. Please specify" " 'disk_type' value from ['thin', 'thick', 'eagerzeroedthick']." % disk_index) current_disk['disk_type'] = disk_type # SCSI Controller Type scsi_contrl_type = disk.get('scsi_type', 'paravirtual').lower() if scsi_contrl_type not in self.scsi_device_type.keys(): self.module.fail_json(msg="Invalid 'scsi_type' specified for disk index [%s]. Please specify" " 'scsi_type' value from ['%s']" % (disk_index, "', '".join(self.scsi_device_type.keys()))) current_disk['scsi_type'] = scsi_contrl_type disks_data.append(current_disk) return disks_data def get_recommended_datastore(self, datastore_cluster_obj): """ Return Storage DRS recommended datastore from datastore cluster Args: datastore_cluster_obj: datastore cluster managed object Returns: Name of recommended datastore from the given datastore cluster, Returns None if no datastore recommendation found. """ # Check if Datastore Cluster provided by user is SDRS ready sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled if sdrs_status: # We can get storage recommendation only if SDRS is enabled on given datastorage cluster pod_sel_spec = vim.storageDrs.PodSelectionSpec() pod_sel_spec.storagePod = datastore_cluster_obj storage_spec = vim.storageDrs.StoragePlacementSpec() storage_spec.podSelectionSpec = pod_sel_spec storage_spec.type = 'create' try: rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec) rec_action = rec.recommendations[0].action[0] return rec_action.destination.name except Exception: # There is some error so we fall back to general workflow pass datastore = None datastore_freespace = 0 for ds in datastore_cluster_obj.childEntity: if ds.summary.freeSpace > datastore_freespace: # If datastore field is provided, filter destination datastores datastore = ds datastore_freespace = ds.summary.freeSpace if datastore: return datastore.name return None @staticmethod def gather_disk_facts(vm_obj): """ Gather facts about VM's disks Args: vm_obj: Managed object of virtual machine Returns: A list of dict containing disks information """ disks_facts = dict() if vm_obj is None: return disks_facts disk_index = 0 for disk in vm_obj.config.hardware.device: if isinstance(disk, vim.vm.device.VirtualDisk): disks_facts[disk_index] = dict( key=disk.key, label=disk.deviceInfo.label, summary=disk.deviceInfo.summary, backing_filename=disk.backing.fileName, backing_datastore=disk.backing.datastore.name, backing_disk_mode=disk.backing.diskMode, backing_writethrough=disk.backing.writeThrough, backing_thinprovisioned=disk.backing.thinProvisioned, backing_eagerlyscrub=bool(disk.backing.eagerlyScrub), controller_key=disk.controllerKey, unit_number=disk.unitNumber, capacity_in_kb=disk.capacityInKB, capacity_in_bytes=disk.capacityInBytes, ) disk_index += 1 return disks_facts def main(): argument_spec = vmware_argument_spec() argument_spec.update( name=dict(type='str'), uuid=dict(type='str'), moid=dict(type='str'), folder=dict(type='str'), datacenter=dict(type='str', required=True), disk=dict(type='list', default=[]), use_instance_uuid=dict(type='bool', default=False), ) module = AnsibleModule( argument_spec=argument_spec, required_one_of=[ ['name', 'uuid', 'moid'] ] ) if module.params['folder']: # FindByInventoryPath() does not require an absolute path # so we should leave the input folder path unmodified module.params['folder'] = module.params['folder'].rstrip('/') pyv = PyVmomiHelper(module) # Check if the VM exists before continuing vm = pyv.get_vm() if not vm: # We unable to find the virtual machine user specified # Bail out vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid')) module.fail_json(msg="Unable to manage disks for non-existing" " virtual machine '%s'." % vm_id) # VM exists try: pyv.ensure_disks(vm_obj=vm) except Exception as exc: module.fail_json(msg="Failed to manage disks for virtual machine" " '%s' with exception : %s" % (vm.name, to_native(exc))) if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
58,592
vmware_guest_disk: add disk_mode parameter
<!--- Verify first that your feature was not already discussed on GitHub --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Describe the new feature/improvement briefly below --> vmware_guest_disk should propose `disk_mode` parameter (like vmware_guest module). The playbook bellow does not raise an error but disk is always added in `persistent` mode, even I set `independent_persistent` for `disk_mode` var. If it's a bug please could you fix it? If it's not consider it as a feature request :) ##### ISSUE TYPE - Feature Idea (or maybe a bug) ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> vmware_guest_disk ##### ADDITIONAL INFORMATION <!--- Describe how the feature would be used, why it is needed and what it would solve --> <!--- Paste example playbooks or commands between quotes below --> ```yaml --- - name: get vm {{ target | upper }} facts vmware_guest_facts: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" datacenter: "{{ datacenter }}" validate_certs: no name: "{{ target | upper }}" register: vm_facts delegate_to: localhost - name: add disk to vm {{ target | upper }} vmware_guest_disk: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" datacenter: "{{ datacenter }}" validate_certs: no name: "{{ target | upper }}" disk: - size_gb: "{{ size_gb }}" type: "{{ type }}" disk_mode: "{{ disk_mode }}" scsi_controller: "{{ scsi_controller }}" unit_number: "{{ unit_number }}" datastore: "{{ vm_facts.instance.hw_datastores | first }}" delegate_to: localhost ``` <!--- HINT: You can also paste gist.github.com links for larger files -->
https://github.com/ansible/ansible/issues/58592
https://github.com/ansible/ansible/pull/60406
1aca1f86b6037def60bf7460c0c702fe7fdd21ac
513bddb44ad2840155cdd66ff42e9b9074632053
2019-07-01T15:03:30Z
python
2019-09-05T15:52:12Z
test/integration/targets/vmware_guest_disk/aliases
closed
ansible/ansible
https://github.com/ansible/ansible
58,592
vmware_guest_disk: add disk_mode parameter
<!--- Verify first that your feature was not already discussed on GitHub --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Describe the new feature/improvement briefly below --> vmware_guest_disk should propose `disk_mode` parameter (like vmware_guest module). The playbook bellow does not raise an error but disk is always added in `persistent` mode, even I set `independent_persistent` for `disk_mode` var. If it's a bug please could you fix it? If it's not consider it as a feature request :) ##### ISSUE TYPE - Feature Idea (or maybe a bug) ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> vmware_guest_disk ##### ADDITIONAL INFORMATION <!--- Describe how the feature would be used, why it is needed and what it would solve --> <!--- Paste example playbooks or commands between quotes below --> ```yaml --- - name: get vm {{ target | upper }} facts vmware_guest_facts: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" datacenter: "{{ datacenter }}" validate_certs: no name: "{{ target | upper }}" register: vm_facts delegate_to: localhost - name: add disk to vm {{ target | upper }} vmware_guest_disk: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" datacenter: "{{ datacenter }}" validate_certs: no name: "{{ target | upper }}" disk: - size_gb: "{{ size_gb }}" type: "{{ type }}" disk_mode: "{{ disk_mode }}" scsi_controller: "{{ scsi_controller }}" unit_number: "{{ unit_number }}" datastore: "{{ vm_facts.instance.hw_datastores | first }}" delegate_to: localhost ``` <!--- HINT: You can also paste gist.github.com links for larger files -->
https://github.com/ansible/ansible/issues/58592
https://github.com/ansible/ansible/pull/60406
1aca1f86b6037def60bf7460c0c702fe7fdd21ac
513bddb44ad2840155cdd66ff42e9b9074632053
2019-07-01T15:03:30Z
python
2019-09-05T15:52:12Z
test/integration/targets/vmware_guest_disk/tasks/main.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,876
Nxos privilage escalation not working properly
##### SUMMARY I found another issue that is similar to the issue reported in https://github.com/ansible/ansible/issues/61568. In this case if I run the playbook described below I encounter the following issue: ``` TASK [nxos_become : run commands with become] *********************************************************************************************************************************************************** task path: /root/agents-ci/ansible/test/integration/targets/nxos_become/tests/cli/sanity.yaml:7 <n3k.example.com> attempting to start connection <n3k.example.com> using connection plugin network_cli <n3k.example.com> found existing local domain socket, using it! <n3k.example.com> updating play_context for connection <n3k.example.com> The full traceback is: Traceback (most recent call last): File "/root/agents-ci/ansible/bin/ansible-connection", line 300, in main conn.update_play_context(pc_data) File "/root/agents-ci/ansible/lib/ansible/module_utils/connection.py", line 185, in __rpc__ raise ConnectionError(to_text(msg, errors='surrogate_then_replace'), code=code) ConnectionError: 'NoneType' object has no attribute 'endswith' fatal: [n3k-priv]: FAILED! => { "msg": "'NoneType' object has no attribute 'endswith'" } ``` ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME nxos ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.10.0.dev0 config file = /root/agents-ci/ansible/test/integration/ansible.cfg configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /root/agents-ci/ansible/lib/ansible executable location = /root/agents-ci/ansible/bin/ansible python version = 2.7.12 (default, Dec 4 2017, 14:50:18) [GCC 5.4.0 20160609] ``` ##### STEPS TO REPRODUCE Just like I describe in https://github.com/ansible/ansible/issues/61568, if I checkout the commit right before https://github.com/ansible/ansible/commit/7d3c4a88823846cbcea7c61de38658a6d63d4265 I don't see the problem. I decided to open a separate issue for this though because if I patch in the potential fix (https://github.com/ansible/ansible/pull/61570) I still see the issue. ##### EXPECTED RESULTS Test should pass on all NXOS platforms. ##### ACTUAL RESULTS Test fails with error message ``` Traceback (most recent call last): File "/root/agents-ci/ansible/bin/ansible-connection", line 300, in main conn.update_play_context(pc_data) File "/root/agents-ci/ansible/lib/ansible/module_utils/connection.py", line 185, in __rpc__ raise ConnectionError(to_text(msg, errors='surrogate_then_replace'), code=code) ConnectionError: 'NoneType' object has no attribute 'endswith' ``` Here is the playbook that exposes the issue. Command to execute the playbook: `ansible-playbook -i ~/hosts nxos.yaml -vvvv` **nxos.yaml** ```yaml --- # This first play configures a new user called test_ansible and sets # the enable secret - name: Setup test user for role validation hosts: nxos gather_facts: no connection: network_cli tasks: - set_fact: nxos_priv_host="{{ hostvars[inventory_hostname]["groups"]["nxos_privilage_user"][0] }}" - set_fact: priv_password="{{ hostvars[nxos_priv_host]["ansible_ssh_pass"] }}" - name: Enable Feature Privilege nxos_config: lines: - feature privilege - name: Configure user ansible_test with role priv-14 nxos_config: lines: - no username test_ansible - "username test_ansible password {{ priv_password }} role priv-14" - enable secret 0 cisco # This second play uses the nxos_become role to verify that ansible # can login as the test user created in the first play and then use # become to enable a higher privilage level - name: Verify privliage role escalation using become hosts: nxos_privilage_user gather_facts: no connection: local vars: debug: false roles: - { role: nxos_become } # This third and final play is used to cleanup the test user - name: Cleanup test user hosts: nxos gather_facts: no connection: network_cli tasks: - name: Remove user ansible_test nxos_config: lines: - no username test_ansible - no enable secret ``` **nxos_become role** Everything committed into the current `devel` role but replace `targets/nxos_become/tests/cli/sanity.yaml` with: ```yaml --- - debug: msg="START connection={{ ansible_connection }}/sanity.yaml" - debug: msg="Using provider={{ connection.transport }}/sanity.yaml" when: ansible_connection == "local" - block: - name: run commands with become nxos_command: commands: 'show privilege' become: yes register: result - assert: that: - "'Current privilege level: 15' in result['stdout'][0]" - debug: msg="END connection={{ ansible_connection }}/sanity.yaml" ```
https://github.com/ansible/ansible/issues/61876
https://github.com/ansible/ansible/pull/61797
394a05108dc79efc8d917fa0febbfcf721e94b61
a365e77cc350ff5ce2dfaf421d7a3f54152cf187
2019-09-05T18:09:22Z
python
2019-09-05T18:48:16Z
lib/ansible/plugins/connection/network_cli.py
# (c) 2016 Red Hat Inc. # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ --- author: Ansible Networking Team connection: network_cli short_description: Use network_cli to run command on network appliances description: - This connection plugin provides a connection to remote devices over the SSH and implements a CLI shell. This connection plugin is typically used by network devices for sending and receiving CLi commands to network devices. version_added: "2.3" options: host: description: - Specifies the remote device FQDN or IP address to establish the SSH connection to. default: inventory_hostname vars: - name: ansible_host port: type: int description: - Specifies the port on the remote device that listens for connections when establishing the SSH connection. default: 22 ini: - section: defaults key: remote_port env: - name: ANSIBLE_REMOTE_PORT vars: - name: ansible_port network_os: description: - Configures the device platform network operating system. This value is used to load the correct terminal and cliconf plugins to communicate with the remote device. vars: - name: ansible_network_os remote_user: description: - The username used to authenticate to the remote device when the SSH connection is first established. If the remote_user is not specified, the connection will use the username of the logged in user. - Can be configured from the CLI via the C(--user) or C(-u) options. ini: - section: defaults key: remote_user env: - name: ANSIBLE_REMOTE_USER vars: - name: ansible_user password: description: - Configures the user password used to authenticate to the remote device when first establishing the SSH connection. vars: - name: ansible_password - name: ansible_ssh_pass - name: ansible_ssh_password private_key_file: description: - The private SSH key or certificate file used to authenticate to the remote device when first establishing the SSH connection. ini: - section: defaults key: private_key_file env: - name: ANSIBLE_PRIVATE_KEY_FILE vars: - name: ansible_private_key_file timeout: type: int description: - Sets the connection time, in seconds, for communicating with the remote device. This timeout is used as the default timeout value for commands when issuing a command to the network CLI. If the command does not return in timeout seconds, an error is generated. default: 120 become: type: boolean description: - The become option will instruct the CLI session to attempt privilege escalation on platforms that support it. Normally this means transitioning from user mode to C(enable) mode in the CLI session. If become is set to True and the remote device does not support privilege escalation or the privilege has already been elevated, then this option is silently ignored. - Can be configured from the CLI via the C(--become) or C(-b) options. default: False ini: - section: privilege_escalation key: become env: - name: ANSIBLE_BECOME vars: - name: ansible_become become_method: description: - This option allows the become method to be specified in for handling privilege escalation. Typically the become_method value is set to C(enable) but could be defined as other values. default: sudo ini: - section: privilege_escalation key: become_method env: - name: ANSIBLE_BECOME_METHOD vars: - name: ansible_become_method host_key_auto_add: type: boolean description: - By default, Ansible will prompt the user before adding SSH keys to the known hosts file. Since persistent connections such as network_cli run in background processes, the user will never be prompted. By enabling this option, unknown host keys will automatically be added to the known hosts file. - Be sure to fully understand the security implications of enabling this option on production systems as it could create a security vulnerability. default: False ini: - section: paramiko_connection key: host_key_auto_add env: - name: ANSIBLE_HOST_KEY_AUTO_ADD persistent_connect_timeout: type: int description: - Configures, in seconds, the amount of time to wait when trying to initially establish a persistent connection. If this value expires before the connection to the remote device is completed, the connection will fail. default: 30 ini: - section: persistent_connection key: connect_timeout env: - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT vars: - name: ansible_connect_timeout persistent_command_timeout: type: int description: - Configures, in seconds, the amount of time to wait for a command to return from the remote device. If this timer is exceeded before the command returns, the connection plugin will raise an exception and close. default: 30 ini: - section: persistent_connection key: command_timeout env: - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT vars: - name: ansible_command_timeout persistent_buffer_read_timeout: type: float description: - Configures, in seconds, the amount of time to wait for the data to be read from Paramiko channel after the command prompt is matched. This timeout value ensures that command prompt matched is correct and there is no more data left to be received from remote host. default: 0.1 ini: - section: persistent_connection key: buffer_read_timeout env: - name: ANSIBLE_PERSISTENT_BUFFER_READ_TIMEOUT vars: - name: ansible_buffer_read_timeout persistent_log_messages: type: boolean description: - This flag will enable logging the command executed and response received from target device in the ansible log file. For this option to work 'log_path' ansible configuration option is required to be set to a file path with write access. - Be sure to fully understand the security implications of enabling this option as it could create a security vulnerability by logging sensitive information in log file. default: False ini: - section: persistent_connection key: log_messages env: - name: ANSIBLE_PERSISTENT_LOG_MESSAGES vars: - name: ansible_persistent_log_messages terminal_stdout_re: type: list elements: dict version_added: '2.9' description: - A single regex pattern or a sequence of patterns along with optional flags to match the command prompt from the received response chunk. This option accepts C(pattern) and C(flags) keys. The value of C(pattern) is a python regex pattern to match the response and the value of C(flags) is the value accepted by I(flags) argument of I(re.compile) python method to control the way regex is matched with the response, for example I('re.I'). vars: - name: ansible_terminal_stdout_re terminal_stderr_re: type: list elements: dict version_added: '2.9' description: - This option provides the regex pattern and optional flags to match the error string from the received response chunk. This option accepts C(pattern) and C(flags) keys. The value of C(pattern) is a python regex pattern to match the response and the value of C(flags) is the value accepted by I(flags) argument of I(re.compile) python method to control the way regex is matched with the response, for example I('re.I'). vars: - name: ansible_terminal_stderr_re terminal_initial_prompt: type: list version_added: '2.9' description: - A single regex pattern or a sequence of patterns to evaluate the expected prompt at the time of initial login to the remote host. vars: - name: ansible_terminal_initial_prompt terminal_initial_answer: type: list version_added: '2.9' description: - The answer to reply with if the C(terminal_initial_prompt) is matched. The value can be a single answer or a list of answers for multiple terminal_initial_prompt. In case the login menu has multiple prompts the sequence of the prompt and excepted answer should be in same order and the value of I(terminal_prompt_checkall) should be set to I(True) if all the values in C(terminal_initial_prompt) are expected to be matched and set to I(False) if any one login prompt is to be matched. vars: - name: ansible_terminal_initial_answer terminal_initial_prompt_checkall: type: boolean version_added: '2.9' description: - By default the value is set to I(False) and any one of the prompts mentioned in C(terminal_initial_prompt) option is matched it won't check for other prompts. When set to I(True) it will check for all the prompts mentioned in C(terminal_initial_prompt) option in the given order and all the prompts should be received from remote host if not it will result in timeout. default: False vars: - name: ansible_terminal_initial_prompt_checkall terminal_inital_prompt_newline: type: boolean version_added: '2.9' description: - This boolean flag, that when set to I(True) will send newline in the response if any of values in I(terminal_initial_prompt) is matched. default: True vars: - name: ansible_terminal_initial_prompt_newline network_cli_retries: description: - Number of attempts to connect to remote host. The delay time between the retires increases after every attempt by power of 2 in seconds till either the maximum attempts are exhausted or any of the C(persistent_command_timeout) or C(persistent_connect_timeout) timers are triggered. default: 3 version_added: '2.9' type: integer env: - name: ANSIBLE_NETWORK_CLI_RETRIES ini: - section: persistent_connection key: network_cli_retries vars: - name: ansible_network_cli_retries """ import getpass import json import logging import re import os import signal import socket import time import traceback from io import BytesIO from ansible.errors import AnsibleConnectionFailure from ansible.module_utils.six import PY3 from ansible.module_utils.six.moves import cPickle from ansible.module_utils.network.common.utils import to_list from ansible.module_utils._text import to_bytes, to_text from ansible.playbook.play_context import PlayContext from ansible.plugins.connection import NetworkConnectionBase, ensure_connect from ansible.plugins.loader import cliconf_loader, terminal_loader, connection_loader class AnsibleCmdRespRecv(Exception): pass class Connection(NetworkConnectionBase): ''' CLI (shell) SSH connections on Paramiko ''' transport = 'network_cli' has_pipelining = True def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) self._ssh_shell = None self._matched_prompt = None self._matched_cmd_prompt = None self._matched_pattern = None self._last_response = None self._history = list() self._command_response = None self._terminal = None self.cliconf = None self.paramiko_conn = None if self._play_context.verbosity > 3: logging.getLogger('paramiko').setLevel(logging.DEBUG) if self._network_os: self._terminal = terminal_loader.get(self._network_os, self) if not self._terminal: raise AnsibleConnectionFailure('network os %s is not supported' % self._network_os) self.cliconf = cliconf_loader.get(self._network_os, self) if self.cliconf: self.queue_message('vvvv', 'loaded cliconf plugin for network_os %s' % self._network_os) self._sub_plugin = {'type': 'cliconf', 'name': self._network_os, 'obj': self.cliconf} else: self.queue_message('vvvv', 'unable to load cliconf for network_os %s' % self._network_os) else: raise AnsibleConnectionFailure( 'Unable to automatically determine host network os. Please ' 'manually configure ansible_network_os value for this host' ) self.queue_message('log', 'network_os is set to %s' % self._network_os) def _get_log_channel(self): name = "p=%s u=%s | " % (os.getpid(), getpass.getuser()) name += "paramiko [%s]" % self._play_context.remote_addr return name def get_prompt(self): """Returns the current prompt from the device""" return self._matched_prompt def exec_command(self, cmd, in_data=None, sudoable=True): # this try..except block is just to handle the transition to supporting # network_cli as a toplevel connection. Once connection=local is gone, # this block can be removed as well and all calls passed directly to # the local connection if self._ssh_shell: try: cmd = json.loads(to_text(cmd, errors='surrogate_or_strict')) kwargs = {'command': to_bytes(cmd['command'], errors='surrogate_or_strict')} for key in ('prompt', 'answer', 'sendonly', 'newline', 'prompt_retry_check'): if cmd.get(key) is True or cmd.get(key) is False: kwargs[key] = cmd[key] elif cmd.get(key) is not None: kwargs[key] = to_bytes(cmd[key], errors='surrogate_or_strict') return self.send(**kwargs) except ValueError: cmd = to_bytes(cmd, errors='surrogate_or_strict') return self.send(command=cmd) else: return super(Connection, self).exec_command(cmd, in_data, sudoable) def update_play_context(self, pc_data): """Updates the play context information for the connection""" pc_data = to_bytes(pc_data) if PY3: pc_data = cPickle.loads(pc_data, encoding='bytes') else: pc_data = cPickle.loads(pc_data) play_context = PlayContext() play_context.deserialize(pc_data) self.queue_message('vvvv', 'updating play_context for connection') if self._play_context.become ^ play_context.become: if play_context.become is True: auth_pass = play_context.become_pass self._terminal.on_become(passwd=auth_pass) self.queue_message('vvvv', 'authorizing connection') else: self._terminal.on_unbecome() self.queue_message('vvvv', 'deauthorizing connection') self._play_context = play_context if hasattr(self, 'reset_history'): self.reset_history() if hasattr(self, 'disable_response_logging'): self.disable_response_logging() def _connect(self): ''' Connects to the remote device and starts the terminal ''' if not self.connected: self.paramiko_conn = connection_loader.get('paramiko', self._play_context, '/dev/null') self.paramiko_conn._set_log_channel(self._get_log_channel()) self.paramiko_conn.set_options(direct={'look_for_keys': not bool(self._play_context.password and not self._play_context.private_key_file)}) self.paramiko_conn.force_persistence = self.force_persistence command_timeout = self.get_option('persistent_command_timeout') max_pause = min([self.get_option('persistent_connect_timeout'), command_timeout]) retries = self.get_option('network_cli_retries') total_pause = 0 for attempt in range(retries + 1): try: ssh = self.paramiko_conn._connect() break except Exception as e: pause = 2 ** (attempt + 1) if attempt == retries or total_pause >= max_pause: raise AnsibleConnectionFailure(to_text(e, errors='surrogate_or_strict')) else: msg = (u"network_cli_retry: attempt: %d, caught exception(%s), " u"pausing for %d seconds" % (attempt + 1, to_text(e, errors='surrogate_or_strict'), pause)) self.queue_message('vv', msg) time.sleep(pause) total_pause += pause continue self.queue_message('vvvv', 'ssh connection done, setting terminal') self._connected = True self._ssh_shell = ssh.ssh.invoke_shell() self._ssh_shell.settimeout(command_timeout) self.queue_message('vvvv', 'loaded terminal plugin for network_os %s' % self._network_os) terminal_initial_prompt = self.get_option('terminal_initial_prompt') or self._terminal.terminal_initial_prompt terminal_initial_answer = self.get_option('terminal_initial_answer') or self._terminal.terminal_initial_answer newline = self.get_option('terminal_inital_prompt_newline') or self._terminal.terminal_inital_prompt_newline check_all = self.get_option('terminal_initial_prompt_checkall') or False self.receive(prompts=terminal_initial_prompt, answer=terminal_initial_answer, newline=newline, check_all=check_all) self.queue_message('vvvv', 'firing event: on_open_shell()') self._terminal.on_open_shell() if self._play_context.become and self._play_context.become_method == 'enable': self.queue_message('vvvv', 'firing event: on_become') auth_pass = self._play_context.become_pass self._terminal.on_become(passwd=auth_pass) self.queue_message('vvvv', 'ssh connection has completed successfully') return self def close(self): ''' Close the active connection to the device ''' # only close the connection if its connected. if self._connected: self.queue_message('debug', "closing ssh connection to device") if self._ssh_shell: self.queue_message('debug', "firing event: on_close_shell()") self._terminal.on_close_shell() self._ssh_shell.close() self._ssh_shell = None self.queue_message('debug', "cli session is now closed") self.paramiko_conn.close() self.paramiko_conn = None self.queue_message('debug', "ssh connection has been closed successfully") super(Connection, self).close() def receive(self, command=None, prompts=None, answer=None, newline=True, prompt_retry_check=False, check_all=False): ''' Handles receiving of output from command ''' self._matched_prompt = None self._matched_cmd_prompt = None recv = BytesIO() handled = False command_prompt_matched = False matched_prompt_window = window_count = 0 # set terminal regex values for command prompt and errors in response self._terminal_stderr_re = self._get_terminal_std_re('terminal_stderr_re') self._terminal_stdout_re = self._get_terminal_std_re('terminal_stdout_re') cache_socket_timeout = self._ssh_shell.gettimeout() command_timeout = self.get_option('persistent_command_timeout') self._validate_timeout_value(command_timeout, "persistent_command_timeout") if cache_socket_timeout != command_timeout: self._ssh_shell.settimeout(command_timeout) buffer_read_timeout = self.get_option('persistent_buffer_read_timeout') self._validate_timeout_value(buffer_read_timeout, "persistent_buffer_read_timeout") self._log_messages("command: %s" % command) while True: if command_prompt_matched: try: signal.signal(signal.SIGALRM, self._handle_buffer_read_timeout) signal.setitimer(signal.ITIMER_REAL, buffer_read_timeout) data = self._ssh_shell.recv(256) signal.alarm(0) self._log_messages("response-%s: %s" % (window_count + 1, data)) # if data is still received on channel it indicates the prompt string # is wrongly matched in between response chunks, continue to read # remaining response. command_prompt_matched = False # restart command_timeout timer signal.signal(signal.SIGALRM, self._handle_command_timeout) signal.alarm(command_timeout) except AnsibleCmdRespRecv: # reset socket timeout to global timeout self._ssh_shell.settimeout(cache_socket_timeout) return self._command_response else: data = self._ssh_shell.recv(256) self._log_messages("response-%s: %s" % (window_count + 1, data)) # when a channel stream is closed, received data will be empty if not data: break recv.write(data) offset = recv.tell() - 256 if recv.tell() > 256 else 0 recv.seek(offset) window = self._strip(recv.read()) window_count += 1 if prompts and not handled: handled = self._handle_prompt(window, prompts, answer, newline, False, check_all) matched_prompt_window = window_count elif prompts and handled and prompt_retry_check and matched_prompt_window + 1 == window_count: # check again even when handled, if same prompt repeats in next window # (like in the case of a wrong enable password, etc) indicates # value of answer is wrong, report this as error. if self._handle_prompt(window, prompts, answer, newline, prompt_retry_check, check_all): raise AnsibleConnectionFailure("For matched prompt '%s', answer is not valid" % self._matched_cmd_prompt) if self._find_prompt(window): self._last_response = recv.getvalue() resp = self._strip(self._last_response) self._command_response = self._sanitize(resp, command) if buffer_read_timeout == 0.0: # reset socket timeout to global timeout self._ssh_shell.settimeout(cache_socket_timeout) return self._command_response else: command_prompt_matched = True @ensure_connect def send(self, command, prompt=None, answer=None, newline=True, sendonly=False, prompt_retry_check=False, check_all=False): ''' Sends the command to the device in the opened shell ''' if check_all: prompt_len = len(to_list(prompt)) answer_len = len(to_list(answer)) if prompt_len != answer_len: raise AnsibleConnectionFailure("Number of prompts (%s) is not same as that of answers (%s)" % (prompt_len, answer_len)) try: cmd = b'%s\r' % command self._history.append(cmd) self._ssh_shell.sendall(cmd) self._log_messages('send command: %s' % cmd) if sendonly: return response = self.receive(command, prompt, answer, newline, prompt_retry_check, check_all) return to_text(response, errors='surrogate_or_strict') except (socket.timeout, AttributeError): self.queue_message('error', traceback.format_exc()) raise AnsibleConnectionFailure("timeout value %s seconds reached while trying to send command: %s" % (self._ssh_shell.gettimeout(), command.strip())) def _handle_buffer_read_timeout(self, signum, frame): self.queue_message('vvvv', "Response received, triggered 'persistent_buffer_read_timeout' timer of %s seconds" % self.get_option('persistent_buffer_read_timeout')) raise AnsibleCmdRespRecv() def _handle_command_timeout(self, signum, frame): msg = 'command timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and Troubleshooting Guide.'\ % self.get_option('persistent_command_timeout') self.queue_message('log', msg) raise AnsibleConnectionFailure(msg) def _strip(self, data): ''' Removes ANSI codes from device response ''' for regex in self._terminal.ansi_re: data = regex.sub(b'', data) return data def _handle_prompt(self, resp, prompts, answer, newline, prompt_retry_check=False, check_all=False): ''' Matches the command prompt and responds :arg resp: Byte string containing the raw response from the remote :arg prompts: Sequence of byte strings that we consider prompts for input :arg answer: Sequence of Byte string to send back to the remote if we find a prompt. A carriage return is automatically appended to this string. :param prompt_retry_check: Bool value for trying to detect more prompts :param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of given prompt. :returns: True if a prompt was found in ``resp``. If check_all is True will True only after all the prompt in the prompts list are matched. False otherwise. ''' single_prompt = False if not isinstance(prompts, list): prompts = [prompts] single_prompt = True if not isinstance(answer, list): answer = [answer] prompts_regex = [re.compile(to_bytes(r), re.I) for r in prompts] for index, regex in enumerate(prompts_regex): match = regex.search(resp) if match: self._matched_cmd_prompt = match.group() self._log_messages("matched command prompt: %s" % self._matched_cmd_prompt) # if prompt_retry_check is enabled to check if same prompt is # repeated don't send answer again. if not prompt_retry_check: prompt_answer = answer[index] if len(answer) > index else answer[0] self._ssh_shell.sendall(b'%s' % prompt_answer) if newline: self._ssh_shell.sendall(b'\r') prompt_answer += b'\r' self._log_messages("matched command prompt answer: %s" % prompt_answer) if check_all and prompts and not single_prompt: prompts.pop(0) answer.pop(0) return False return True return False def _sanitize(self, resp, command=None): ''' Removes elements from the response before returning to the caller ''' cleaned = [] for line in resp.splitlines(): if command and line.strip() == command.strip(): continue for prompt in self._matched_prompt.strip().splitlines(): if prompt.strip() in line: break else: cleaned.append(line) return b'\n'.join(cleaned).strip() def _find_prompt(self, response): '''Searches the buffered response for a matching command prompt ''' errored_response = None is_error_message = False for regex in self._terminal_stderr_re: if regex.search(response): is_error_message = True # Check if error response ends with command prompt if not # receive it buffered prompt for regex in self._terminal_stdout_re: match = regex.search(response) if match: errored_response = response self._matched_pattern = regex.pattern self._matched_prompt = match.group() self._log_messages("matched error regex '%s' from response '%s'" % (self._matched_pattern, errored_response)) break if not is_error_message: for regex in self._terminal_stdout_re: match = regex.search(response) if match: self._matched_pattern = regex.pattern self._matched_prompt = match.group() self._log_messages("matched cli prompt '%s' with regex '%s' from response '%s'" % (self._matched_prompt, self._matched_pattern, response)) if not errored_response: return True if errored_response: raise AnsibleConnectionFailure(errored_response) return False def _validate_timeout_value(self, timeout, timer_name): if timeout < 0: raise AnsibleConnectionFailure("'%s' timer value '%s' is invalid, value should be greater than or equal to zero." % (timer_name, timeout)) def transport_test(self, connect_timeout): """This method enables wait_for_connection to work. As it is used by wait_for_connection, it is called by that module's action plugin, which is on the controller process, which means that nothing done on this instance should impact the actual persistent connection... this check is for informational purposes only and should be properly cleaned up. """ # Force a fresh connect if for some reason we have connected before. self.close() self._connect() self.close() def _get_terminal_std_re(self, option): terminal_std_option = self.get_option(option) terminal_std_re = [] if terminal_std_option: for item in terminal_std_option: if "pattern" not in item: raise AnsibleConnectionFailure("'pattern' is a required key for option '%s'," " received option value is %s" % (option, item)) pattern = br"%s" % to_bytes(item['pattern']) flag = item.get('flags', 0) if flag: flag = getattr(re, flag.split('.')[1]) terminal_std_re.append(re.compile(pattern, flag)) else: # To maintain backward compatibility terminal_std_re = getattr(self._terminal, option) return terminal_std_re
closed
ansible/ansible
https://github.com/ansible/ansible
61,819
eos tests: AttributeError: 'NoneType' object has no attribute 'endswith'
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY We seem to be having an issue with eos testing and AttributeError: 'NoneType' object has no attribute 'endswith'. https://object-storage-ca-ymq-1.vexxhost.net/v1/a0b4156a37f9453eb4ec7db5422272df/ansible_79/61779/2a3bc59e39230ad945d42da08a8aad3f91dee027/third-party-check/ansible-test-network-integration-eos-python37/52a7e42/controller/ansible-debug.html#l48225 It looks like self._get_prompt() is None. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> eos_logging ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml zuul.ansible.com ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below 2019-09-04 20:16:25,014 p=zuul u=31800 | Traceback (most recent call last): File "/home/zuul/src/github.com/ansible/ansible/lib/ansible/utils/jsonrpc.py", line 45, in handle_request result = rpc_method(*args, **kwargs) File "/home/zuul/src/github.com/ansible/ansible/lib/ansible/plugins/connection/network_cli.py", line 389, in update_play_context self._terminal.on_become(passwd=auth_pass) File "/home/zuul/src/github.com/ansible/ansible/lib/ansible/plugins/terminal/eos.py", line 62, in on_become if self._get_prompt().endswith(b'#'): AttributeError: 'NoneType' object has no attribute 'endswith' ```
https://github.com/ansible/ansible/issues/61819
https://github.com/ansible/ansible/pull/61797
394a05108dc79efc8d917fa0febbfcf721e94b61
a365e77cc350ff5ce2dfaf421d7a3f54152cf187
2019-09-04T23:14:41Z
python
2019-09-05T18:48:16Z
lib/ansible/plugins/connection/network_cli.py
# (c) 2016 Red Hat Inc. # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ --- author: Ansible Networking Team connection: network_cli short_description: Use network_cli to run command on network appliances description: - This connection plugin provides a connection to remote devices over the SSH and implements a CLI shell. This connection plugin is typically used by network devices for sending and receiving CLi commands to network devices. version_added: "2.3" options: host: description: - Specifies the remote device FQDN or IP address to establish the SSH connection to. default: inventory_hostname vars: - name: ansible_host port: type: int description: - Specifies the port on the remote device that listens for connections when establishing the SSH connection. default: 22 ini: - section: defaults key: remote_port env: - name: ANSIBLE_REMOTE_PORT vars: - name: ansible_port network_os: description: - Configures the device platform network operating system. This value is used to load the correct terminal and cliconf plugins to communicate with the remote device. vars: - name: ansible_network_os remote_user: description: - The username used to authenticate to the remote device when the SSH connection is first established. If the remote_user is not specified, the connection will use the username of the logged in user. - Can be configured from the CLI via the C(--user) or C(-u) options. ini: - section: defaults key: remote_user env: - name: ANSIBLE_REMOTE_USER vars: - name: ansible_user password: description: - Configures the user password used to authenticate to the remote device when first establishing the SSH connection. vars: - name: ansible_password - name: ansible_ssh_pass - name: ansible_ssh_password private_key_file: description: - The private SSH key or certificate file used to authenticate to the remote device when first establishing the SSH connection. ini: - section: defaults key: private_key_file env: - name: ANSIBLE_PRIVATE_KEY_FILE vars: - name: ansible_private_key_file timeout: type: int description: - Sets the connection time, in seconds, for communicating with the remote device. This timeout is used as the default timeout value for commands when issuing a command to the network CLI. If the command does not return in timeout seconds, an error is generated. default: 120 become: type: boolean description: - The become option will instruct the CLI session to attempt privilege escalation on platforms that support it. Normally this means transitioning from user mode to C(enable) mode in the CLI session. If become is set to True and the remote device does not support privilege escalation or the privilege has already been elevated, then this option is silently ignored. - Can be configured from the CLI via the C(--become) or C(-b) options. default: False ini: - section: privilege_escalation key: become env: - name: ANSIBLE_BECOME vars: - name: ansible_become become_method: description: - This option allows the become method to be specified in for handling privilege escalation. Typically the become_method value is set to C(enable) but could be defined as other values. default: sudo ini: - section: privilege_escalation key: become_method env: - name: ANSIBLE_BECOME_METHOD vars: - name: ansible_become_method host_key_auto_add: type: boolean description: - By default, Ansible will prompt the user before adding SSH keys to the known hosts file. Since persistent connections such as network_cli run in background processes, the user will never be prompted. By enabling this option, unknown host keys will automatically be added to the known hosts file. - Be sure to fully understand the security implications of enabling this option on production systems as it could create a security vulnerability. default: False ini: - section: paramiko_connection key: host_key_auto_add env: - name: ANSIBLE_HOST_KEY_AUTO_ADD persistent_connect_timeout: type: int description: - Configures, in seconds, the amount of time to wait when trying to initially establish a persistent connection. If this value expires before the connection to the remote device is completed, the connection will fail. default: 30 ini: - section: persistent_connection key: connect_timeout env: - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT vars: - name: ansible_connect_timeout persistent_command_timeout: type: int description: - Configures, in seconds, the amount of time to wait for a command to return from the remote device. If this timer is exceeded before the command returns, the connection plugin will raise an exception and close. default: 30 ini: - section: persistent_connection key: command_timeout env: - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT vars: - name: ansible_command_timeout persistent_buffer_read_timeout: type: float description: - Configures, in seconds, the amount of time to wait for the data to be read from Paramiko channel after the command prompt is matched. This timeout value ensures that command prompt matched is correct and there is no more data left to be received from remote host. default: 0.1 ini: - section: persistent_connection key: buffer_read_timeout env: - name: ANSIBLE_PERSISTENT_BUFFER_READ_TIMEOUT vars: - name: ansible_buffer_read_timeout persistent_log_messages: type: boolean description: - This flag will enable logging the command executed and response received from target device in the ansible log file. For this option to work 'log_path' ansible configuration option is required to be set to a file path with write access. - Be sure to fully understand the security implications of enabling this option as it could create a security vulnerability by logging sensitive information in log file. default: False ini: - section: persistent_connection key: log_messages env: - name: ANSIBLE_PERSISTENT_LOG_MESSAGES vars: - name: ansible_persistent_log_messages terminal_stdout_re: type: list elements: dict version_added: '2.9' description: - A single regex pattern or a sequence of patterns along with optional flags to match the command prompt from the received response chunk. This option accepts C(pattern) and C(flags) keys. The value of C(pattern) is a python regex pattern to match the response and the value of C(flags) is the value accepted by I(flags) argument of I(re.compile) python method to control the way regex is matched with the response, for example I('re.I'). vars: - name: ansible_terminal_stdout_re terminal_stderr_re: type: list elements: dict version_added: '2.9' description: - This option provides the regex pattern and optional flags to match the error string from the received response chunk. This option accepts C(pattern) and C(flags) keys. The value of C(pattern) is a python regex pattern to match the response and the value of C(flags) is the value accepted by I(flags) argument of I(re.compile) python method to control the way regex is matched with the response, for example I('re.I'). vars: - name: ansible_terminal_stderr_re terminal_initial_prompt: type: list version_added: '2.9' description: - A single regex pattern or a sequence of patterns to evaluate the expected prompt at the time of initial login to the remote host. vars: - name: ansible_terminal_initial_prompt terminal_initial_answer: type: list version_added: '2.9' description: - The answer to reply with if the C(terminal_initial_prompt) is matched. The value can be a single answer or a list of answers for multiple terminal_initial_prompt. In case the login menu has multiple prompts the sequence of the prompt and excepted answer should be in same order and the value of I(terminal_prompt_checkall) should be set to I(True) if all the values in C(terminal_initial_prompt) are expected to be matched and set to I(False) if any one login prompt is to be matched. vars: - name: ansible_terminal_initial_answer terminal_initial_prompt_checkall: type: boolean version_added: '2.9' description: - By default the value is set to I(False) and any one of the prompts mentioned in C(terminal_initial_prompt) option is matched it won't check for other prompts. When set to I(True) it will check for all the prompts mentioned in C(terminal_initial_prompt) option in the given order and all the prompts should be received from remote host if not it will result in timeout. default: False vars: - name: ansible_terminal_initial_prompt_checkall terminal_inital_prompt_newline: type: boolean version_added: '2.9' description: - This boolean flag, that when set to I(True) will send newline in the response if any of values in I(terminal_initial_prompt) is matched. default: True vars: - name: ansible_terminal_initial_prompt_newline network_cli_retries: description: - Number of attempts to connect to remote host. The delay time between the retires increases after every attempt by power of 2 in seconds till either the maximum attempts are exhausted or any of the C(persistent_command_timeout) or C(persistent_connect_timeout) timers are triggered. default: 3 version_added: '2.9' type: integer env: - name: ANSIBLE_NETWORK_CLI_RETRIES ini: - section: persistent_connection key: network_cli_retries vars: - name: ansible_network_cli_retries """ import getpass import json import logging import re import os import signal import socket import time import traceback from io import BytesIO from ansible.errors import AnsibleConnectionFailure from ansible.module_utils.six import PY3 from ansible.module_utils.six.moves import cPickle from ansible.module_utils.network.common.utils import to_list from ansible.module_utils._text import to_bytes, to_text from ansible.playbook.play_context import PlayContext from ansible.plugins.connection import NetworkConnectionBase, ensure_connect from ansible.plugins.loader import cliconf_loader, terminal_loader, connection_loader class AnsibleCmdRespRecv(Exception): pass class Connection(NetworkConnectionBase): ''' CLI (shell) SSH connections on Paramiko ''' transport = 'network_cli' has_pipelining = True def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) self._ssh_shell = None self._matched_prompt = None self._matched_cmd_prompt = None self._matched_pattern = None self._last_response = None self._history = list() self._command_response = None self._terminal = None self.cliconf = None self.paramiko_conn = None if self._play_context.verbosity > 3: logging.getLogger('paramiko').setLevel(logging.DEBUG) if self._network_os: self._terminal = terminal_loader.get(self._network_os, self) if not self._terminal: raise AnsibleConnectionFailure('network os %s is not supported' % self._network_os) self.cliconf = cliconf_loader.get(self._network_os, self) if self.cliconf: self.queue_message('vvvv', 'loaded cliconf plugin for network_os %s' % self._network_os) self._sub_plugin = {'type': 'cliconf', 'name': self._network_os, 'obj': self.cliconf} else: self.queue_message('vvvv', 'unable to load cliconf for network_os %s' % self._network_os) else: raise AnsibleConnectionFailure( 'Unable to automatically determine host network os. Please ' 'manually configure ansible_network_os value for this host' ) self.queue_message('log', 'network_os is set to %s' % self._network_os) def _get_log_channel(self): name = "p=%s u=%s | " % (os.getpid(), getpass.getuser()) name += "paramiko [%s]" % self._play_context.remote_addr return name def get_prompt(self): """Returns the current prompt from the device""" return self._matched_prompt def exec_command(self, cmd, in_data=None, sudoable=True): # this try..except block is just to handle the transition to supporting # network_cli as a toplevel connection. Once connection=local is gone, # this block can be removed as well and all calls passed directly to # the local connection if self._ssh_shell: try: cmd = json.loads(to_text(cmd, errors='surrogate_or_strict')) kwargs = {'command': to_bytes(cmd['command'], errors='surrogate_or_strict')} for key in ('prompt', 'answer', 'sendonly', 'newline', 'prompt_retry_check'): if cmd.get(key) is True or cmd.get(key) is False: kwargs[key] = cmd[key] elif cmd.get(key) is not None: kwargs[key] = to_bytes(cmd[key], errors='surrogate_or_strict') return self.send(**kwargs) except ValueError: cmd = to_bytes(cmd, errors='surrogate_or_strict') return self.send(command=cmd) else: return super(Connection, self).exec_command(cmd, in_data, sudoable) def update_play_context(self, pc_data): """Updates the play context information for the connection""" pc_data = to_bytes(pc_data) if PY3: pc_data = cPickle.loads(pc_data, encoding='bytes') else: pc_data = cPickle.loads(pc_data) play_context = PlayContext() play_context.deserialize(pc_data) self.queue_message('vvvv', 'updating play_context for connection') if self._play_context.become ^ play_context.become: if play_context.become is True: auth_pass = play_context.become_pass self._terminal.on_become(passwd=auth_pass) self.queue_message('vvvv', 'authorizing connection') else: self._terminal.on_unbecome() self.queue_message('vvvv', 'deauthorizing connection') self._play_context = play_context if hasattr(self, 'reset_history'): self.reset_history() if hasattr(self, 'disable_response_logging'): self.disable_response_logging() def _connect(self): ''' Connects to the remote device and starts the terminal ''' if not self.connected: self.paramiko_conn = connection_loader.get('paramiko', self._play_context, '/dev/null') self.paramiko_conn._set_log_channel(self._get_log_channel()) self.paramiko_conn.set_options(direct={'look_for_keys': not bool(self._play_context.password and not self._play_context.private_key_file)}) self.paramiko_conn.force_persistence = self.force_persistence command_timeout = self.get_option('persistent_command_timeout') max_pause = min([self.get_option('persistent_connect_timeout'), command_timeout]) retries = self.get_option('network_cli_retries') total_pause = 0 for attempt in range(retries + 1): try: ssh = self.paramiko_conn._connect() break except Exception as e: pause = 2 ** (attempt + 1) if attempt == retries or total_pause >= max_pause: raise AnsibleConnectionFailure(to_text(e, errors='surrogate_or_strict')) else: msg = (u"network_cli_retry: attempt: %d, caught exception(%s), " u"pausing for %d seconds" % (attempt + 1, to_text(e, errors='surrogate_or_strict'), pause)) self.queue_message('vv', msg) time.sleep(pause) total_pause += pause continue self.queue_message('vvvv', 'ssh connection done, setting terminal') self._connected = True self._ssh_shell = ssh.ssh.invoke_shell() self._ssh_shell.settimeout(command_timeout) self.queue_message('vvvv', 'loaded terminal plugin for network_os %s' % self._network_os) terminal_initial_prompt = self.get_option('terminal_initial_prompt') or self._terminal.terminal_initial_prompt terminal_initial_answer = self.get_option('terminal_initial_answer') or self._terminal.terminal_initial_answer newline = self.get_option('terminal_inital_prompt_newline') or self._terminal.terminal_inital_prompt_newline check_all = self.get_option('terminal_initial_prompt_checkall') or False self.receive(prompts=terminal_initial_prompt, answer=terminal_initial_answer, newline=newline, check_all=check_all) self.queue_message('vvvv', 'firing event: on_open_shell()') self._terminal.on_open_shell() if self._play_context.become and self._play_context.become_method == 'enable': self.queue_message('vvvv', 'firing event: on_become') auth_pass = self._play_context.become_pass self._terminal.on_become(passwd=auth_pass) self.queue_message('vvvv', 'ssh connection has completed successfully') return self def close(self): ''' Close the active connection to the device ''' # only close the connection if its connected. if self._connected: self.queue_message('debug', "closing ssh connection to device") if self._ssh_shell: self.queue_message('debug', "firing event: on_close_shell()") self._terminal.on_close_shell() self._ssh_shell.close() self._ssh_shell = None self.queue_message('debug', "cli session is now closed") self.paramiko_conn.close() self.paramiko_conn = None self.queue_message('debug', "ssh connection has been closed successfully") super(Connection, self).close() def receive(self, command=None, prompts=None, answer=None, newline=True, prompt_retry_check=False, check_all=False): ''' Handles receiving of output from command ''' self._matched_prompt = None self._matched_cmd_prompt = None recv = BytesIO() handled = False command_prompt_matched = False matched_prompt_window = window_count = 0 # set terminal regex values for command prompt and errors in response self._terminal_stderr_re = self._get_terminal_std_re('terminal_stderr_re') self._terminal_stdout_re = self._get_terminal_std_re('terminal_stdout_re') cache_socket_timeout = self._ssh_shell.gettimeout() command_timeout = self.get_option('persistent_command_timeout') self._validate_timeout_value(command_timeout, "persistent_command_timeout") if cache_socket_timeout != command_timeout: self._ssh_shell.settimeout(command_timeout) buffer_read_timeout = self.get_option('persistent_buffer_read_timeout') self._validate_timeout_value(buffer_read_timeout, "persistent_buffer_read_timeout") self._log_messages("command: %s" % command) while True: if command_prompt_matched: try: signal.signal(signal.SIGALRM, self._handle_buffer_read_timeout) signal.setitimer(signal.ITIMER_REAL, buffer_read_timeout) data = self._ssh_shell.recv(256) signal.alarm(0) self._log_messages("response-%s: %s" % (window_count + 1, data)) # if data is still received on channel it indicates the prompt string # is wrongly matched in between response chunks, continue to read # remaining response. command_prompt_matched = False # restart command_timeout timer signal.signal(signal.SIGALRM, self._handle_command_timeout) signal.alarm(command_timeout) except AnsibleCmdRespRecv: # reset socket timeout to global timeout self._ssh_shell.settimeout(cache_socket_timeout) return self._command_response else: data = self._ssh_shell.recv(256) self._log_messages("response-%s: %s" % (window_count + 1, data)) # when a channel stream is closed, received data will be empty if not data: break recv.write(data) offset = recv.tell() - 256 if recv.tell() > 256 else 0 recv.seek(offset) window = self._strip(recv.read()) window_count += 1 if prompts and not handled: handled = self._handle_prompt(window, prompts, answer, newline, False, check_all) matched_prompt_window = window_count elif prompts and handled and prompt_retry_check and matched_prompt_window + 1 == window_count: # check again even when handled, if same prompt repeats in next window # (like in the case of a wrong enable password, etc) indicates # value of answer is wrong, report this as error. if self._handle_prompt(window, prompts, answer, newline, prompt_retry_check, check_all): raise AnsibleConnectionFailure("For matched prompt '%s', answer is not valid" % self._matched_cmd_prompt) if self._find_prompt(window): self._last_response = recv.getvalue() resp = self._strip(self._last_response) self._command_response = self._sanitize(resp, command) if buffer_read_timeout == 0.0: # reset socket timeout to global timeout self._ssh_shell.settimeout(cache_socket_timeout) return self._command_response else: command_prompt_matched = True @ensure_connect def send(self, command, prompt=None, answer=None, newline=True, sendonly=False, prompt_retry_check=False, check_all=False): ''' Sends the command to the device in the opened shell ''' if check_all: prompt_len = len(to_list(prompt)) answer_len = len(to_list(answer)) if prompt_len != answer_len: raise AnsibleConnectionFailure("Number of prompts (%s) is not same as that of answers (%s)" % (prompt_len, answer_len)) try: cmd = b'%s\r' % command self._history.append(cmd) self._ssh_shell.sendall(cmd) self._log_messages('send command: %s' % cmd) if sendonly: return response = self.receive(command, prompt, answer, newline, prompt_retry_check, check_all) return to_text(response, errors='surrogate_or_strict') except (socket.timeout, AttributeError): self.queue_message('error', traceback.format_exc()) raise AnsibleConnectionFailure("timeout value %s seconds reached while trying to send command: %s" % (self._ssh_shell.gettimeout(), command.strip())) def _handle_buffer_read_timeout(self, signum, frame): self.queue_message('vvvv', "Response received, triggered 'persistent_buffer_read_timeout' timer of %s seconds" % self.get_option('persistent_buffer_read_timeout')) raise AnsibleCmdRespRecv() def _handle_command_timeout(self, signum, frame): msg = 'command timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and Troubleshooting Guide.'\ % self.get_option('persistent_command_timeout') self.queue_message('log', msg) raise AnsibleConnectionFailure(msg) def _strip(self, data): ''' Removes ANSI codes from device response ''' for regex in self._terminal.ansi_re: data = regex.sub(b'', data) return data def _handle_prompt(self, resp, prompts, answer, newline, prompt_retry_check=False, check_all=False): ''' Matches the command prompt and responds :arg resp: Byte string containing the raw response from the remote :arg prompts: Sequence of byte strings that we consider prompts for input :arg answer: Sequence of Byte string to send back to the remote if we find a prompt. A carriage return is automatically appended to this string. :param prompt_retry_check: Bool value for trying to detect more prompts :param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of given prompt. :returns: True if a prompt was found in ``resp``. If check_all is True will True only after all the prompt in the prompts list are matched. False otherwise. ''' single_prompt = False if not isinstance(prompts, list): prompts = [prompts] single_prompt = True if not isinstance(answer, list): answer = [answer] prompts_regex = [re.compile(to_bytes(r), re.I) for r in prompts] for index, regex in enumerate(prompts_regex): match = regex.search(resp) if match: self._matched_cmd_prompt = match.group() self._log_messages("matched command prompt: %s" % self._matched_cmd_prompt) # if prompt_retry_check is enabled to check if same prompt is # repeated don't send answer again. if not prompt_retry_check: prompt_answer = answer[index] if len(answer) > index else answer[0] self._ssh_shell.sendall(b'%s' % prompt_answer) if newline: self._ssh_shell.sendall(b'\r') prompt_answer += b'\r' self._log_messages("matched command prompt answer: %s" % prompt_answer) if check_all and prompts and not single_prompt: prompts.pop(0) answer.pop(0) return False return True return False def _sanitize(self, resp, command=None): ''' Removes elements from the response before returning to the caller ''' cleaned = [] for line in resp.splitlines(): if command and line.strip() == command.strip(): continue for prompt in self._matched_prompt.strip().splitlines(): if prompt.strip() in line: break else: cleaned.append(line) return b'\n'.join(cleaned).strip() def _find_prompt(self, response): '''Searches the buffered response for a matching command prompt ''' errored_response = None is_error_message = False for regex in self._terminal_stderr_re: if regex.search(response): is_error_message = True # Check if error response ends with command prompt if not # receive it buffered prompt for regex in self._terminal_stdout_re: match = regex.search(response) if match: errored_response = response self._matched_pattern = regex.pattern self._matched_prompt = match.group() self._log_messages("matched error regex '%s' from response '%s'" % (self._matched_pattern, errored_response)) break if not is_error_message: for regex in self._terminal_stdout_re: match = regex.search(response) if match: self._matched_pattern = regex.pattern self._matched_prompt = match.group() self._log_messages("matched cli prompt '%s' with regex '%s' from response '%s'" % (self._matched_prompt, self._matched_pattern, response)) if not errored_response: return True if errored_response: raise AnsibleConnectionFailure(errored_response) return False def _validate_timeout_value(self, timeout, timer_name): if timeout < 0: raise AnsibleConnectionFailure("'%s' timer value '%s' is invalid, value should be greater than or equal to zero." % (timer_name, timeout)) def transport_test(self, connect_timeout): """This method enables wait_for_connection to work. As it is used by wait_for_connection, it is called by that module's action plugin, which is on the controller process, which means that nothing done on this instance should impact the actual persistent connection... this check is for informational purposes only and should be properly cleaned up. """ # Force a fresh connect if for some reason we have connected before. self.close() self._connect() self.close() def _get_terminal_std_re(self, option): terminal_std_option = self.get_option(option) terminal_std_re = [] if terminal_std_option: for item in terminal_std_option: if "pattern" not in item: raise AnsibleConnectionFailure("'pattern' is a required key for option '%s'," " received option value is %s" % (option, item)) pattern = br"%s" % to_bytes(item['pattern']) flag = item.get('flags', 0) if flag: flag = getattr(re, flag.split('.')[1]) terminal_std_re.append(re.compile(pattern, flag)) else: # To maintain backward compatibility terminal_std_re = getattr(self._terminal, option) return terminal_std_re
closed
ansible/ansible
https://github.com/ansible/ansible
61,844
IOSXR L3 is having idempotent issue
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> IOSXR L3 is having idempotent issue, which is resulting into change as True even if there's no change. https://dashboard.zuul.ansible.com/t/ansible/build/40e9d4f8d8f04e53a06f7b07849aa304 https://dashboard.zuul.ansible.com/t/ansible/build/2447ad82cf5c475c8f5c2c4546ca18f6 https://dashboard.zuul.ansible.com/t/ansible/build/52237d4c64644a1fb18e941b5da75bb8 ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> iosxr_l3_interfaces ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9 and devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ios ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> On the second run of play, the run should be idempotent ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> On the second run of play, the run is not idempotent <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61844
https://github.com/ansible/ansible/pull/61860
375eb9723aed60b121531a4b5134f50fada465ba
1425d2351397b3b4f442ba42d59f0ceb6516017e
2019-09-05T13:20:52Z
python
2019-09-05T20:39:15Z
lib/ansible/module_utils/network/iosxr/config/l3_interfaces/l3_interfaces.py
# -*- coding: utf-8 -*- # Copyright 2019 Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """ The iosxr_l3_interfaces class It is in this file where the current configuration (as dict) is compared to the provided configuration (as dict) and the command set necessary to bring the current configuration to it's desired end-state is created """ from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible.module_utils.network.common.cfg.base import ConfigBase from ansible.module_utils.network.common.utils import to_list from ansible.module_utils.network.iosxr.facts.facts import Facts from ansible.module_utils.network.iosxr.utils.utils import normalize_interface, dict_to_set from ansible.module_utils.network.iosxr.utils.utils import remove_command_from_config_list, add_command_to_config_list from ansible.module_utils.network.iosxr.utils.utils import filter_dict_having_none_value, remove_duplicate_interface from ansible.module_utils.network.iosxr.utils.utils import validate_n_expand_ipv4, validate_ipv6 class L3_Interfaces(ConfigBase): """ The iosxr_l3_interfaces class """ gather_subset = [ '!all', '!min', ] gather_network_resources = [ 'l3_interfaces', ] def get_l3_interfaces_facts(self): """ Get the 'facts' (the current configuration) :rtype: A dictionary :returns: The current configuration as a dictionary """ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) l3_interfaces_facts = facts['ansible_network_resources'].get('l3_interfaces') if not l3_interfaces_facts: return [] return l3_interfaces_facts def execute_module(self): """ Execute the module :rtype: A dictionary :returns: The result from module execution """ result = {'changed': False} commands = list() warnings = list() existing_l3_interfaces_facts = self.get_l3_interfaces_facts() commands.extend(self.set_config(existing_l3_interfaces_facts)) if commands: if not self._module.check_mode: self._connection.edit_config(commands) result['changed'] = True result['commands'] = commands changed_l3_interfaces_facts = self.get_l3_interfaces_facts() result['before'] = existing_l3_interfaces_facts if result['changed']: result['after'] = changed_l3_interfaces_facts result['warnings'] = warnings return result def set_config(self, existing_l3_interfaces_facts): """ Collect the configuration from the args passed to the module, collect the current configuration (as a dict from facts) :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ want = self._module.params['config'] have = existing_l3_interfaces_facts resp = self.set_state(want, have) return to_list(resp) def set_state(self, want, have): """ Select the appropriate function based on the state provided :param want: the desired configuration as a dictionary :param have: the current configuration as a dictionary :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ commands = [] state = self._module.params['state'] if state == 'overridden': commands = self._state_overridden(want, have, self._module) elif state == 'deleted': commands = self._state_deleted(want, have) elif state == 'merged': commands = self._state_merged(want, have, self._module) elif state == 'replaced': commands = self._state_replaced(want, have, self._module) return commands def _state_replaced(self, want, have, module): """ The command generator when state is replaced :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ commands = [] for interface in want: interface['name'] = normalize_interface(interface['name']) for each in have: if each['name'] == interface['name']: break else: commands.extend(self._set_config(interface, dict(), module)) continue have_dict = filter_dict_having_none_value(interface, each) commands.extend(self._clear_config(dict(), have_dict)) commands.extend(self._set_config(interface, each, module)) # Remove the duplicate interface call commands = remove_duplicate_interface(commands) return commands def _state_overridden(self, want, have, module): """ The command generator when state is overridden :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ commands = [] not_in_have = set() in_have = set() for each in have: for interface in want: interface['name'] = normalize_interface(interface['name']) if each['name'] == interface['name']: in_have.add(interface['name']) break elif interface['name'] != each['name']: not_in_have.add(interface['name']) else: # We didn't find a matching desired state, which means we can # pretend we recieved an empty desired state. interface = dict(name=each['name']) kwargs = {'want': interface, 'have': each} commands.extend(self._clear_config(**kwargs)) continue have_dict = filter_dict_having_none_value(interface, each) commands.extend(self._clear_config(dict(), have_dict)) commands.extend(self._set_config(interface, each, module)) # Add the want interface that's not already configured in have interface for each in (not_in_have - in_have): for every in want: interface = 'interface {0}'.format(every['name']) if each and interface not in commands: commands.extend(self._set_config(every, {}, module)) # Remove the duplicate interface call commands = remove_duplicate_interface(commands) return commands def _state_merged(self, want, have, module): """ The command generator when state is merged :rtype: A list :returns: the commands necessary to merge the provided into the current configuration """ commands = [] for interface in want: interface['name'] = normalize_interface(interface['name']) for each in have: if each['name'] == interface['name']: break else: commands.extend(self._set_config(interface, dict(), module)) continue commands.extend(self._set_config(interface, each, module)) return commands def _state_deleted(self, want, have): """ The command generator when state is deleted :rtype: A list :returns: the commands necessary to remove the current configuration of the provided objects """ commands = [] if want: for interface in want: interface['name'] = normalize_interface(interface['name']) for each in have: if each['name'] == interface['name']: break elif interface['name'] in each['name']: break else: continue interface = dict(name=interface['name']) commands.extend(self._clear_config(interface, each)) else: for each in have: want = dict() commands.extend(self._clear_config(want, each)) return commands def _set_config(self, want, have, module): # Set the interface config based on the want and have config commands = [] interface = 'interface ' + want['name'] # To handle L3 IPV4 configuration if want.get("ipv4"): for each in want.get("ipv4"): if each.get('address') != 'dhcp': ip_addr_want = validate_n_expand_ipv4(module, each) each['address'] = ip_addr_want # Get the diff b/w want and have want_dict = dict_to_set(want) have_dict = dict_to_set(have) # To handle L3 IPV4 configuration if dict(want_dict).get('ipv4'): if dict(have_dict).get('ipv4'): diff_ipv4 = set(dict(want_dict).get('ipv4')) - set(dict(have_dict).get('ipv4')) else: diff_ipv4 = set(dict(want_dict).get('ipv4')) for each in diff_ipv4: ipv4_dict = dict(each) if ipv4_dict.get('address') != 'dhcp': cmd = "ipv4 address {0}".format(ipv4_dict['address']) if ipv4_dict.get("secondary"): cmd += " secondary" add_command_to_config_list(interface, cmd, commands) # To handle L3 IPV6 configuration if dict(want_dict).get('ipv6'): if dict(have_dict).get('ipv6'): diff_ipv6 = set(dict(want_dict).get('ipv6')) - set(dict(have_dict).get('ipv6')) else: diff_ipv6 = set(dict(want_dict).get('ipv6')) for each in diff_ipv6: ipv6_dict = dict(each) validate_ipv6(ipv6_dict.get('address'), module) cmd = "ipv6 address {0}".format(ipv6_dict.get('address')) add_command_to_config_list(interface, cmd, commands) return commands def _clear_config(self, want, have): # Delete the interface config based on the want and have config count = 0 commands = [] if want.get('name'): interface = 'interface ' + want['name'] else: interface = 'interface ' + have['name'] if have.get('ipv4') and want.get('ipv4'): for each in have.get('ipv4'): if each.get('secondary') and not (want.get('ipv4')[count].get('secondary')): cmd = 'ipv4 address {0} secondary'.format(each.get('address')) remove_command_from_config_list(interface, cmd, commands) count += 1 if have.get('ipv4') and not (want.get('ipv4')): remove_command_from_config_list(interface, 'ipv4 address', commands) if have.get('ipv6') and not (want.get('ipv6')): remove_command_from_config_list(interface, 'ipv6 address', commands) return commands
closed
ansible/ansible
https://github.com/ansible/ansible
61,844
IOSXR L3 is having idempotent issue
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> IOSXR L3 is having idempotent issue, which is resulting into change as True even if there's no change. https://dashboard.zuul.ansible.com/t/ansible/build/40e9d4f8d8f04e53a06f7b07849aa304 https://dashboard.zuul.ansible.com/t/ansible/build/2447ad82cf5c475c8f5c2c4546ca18f6 https://dashboard.zuul.ansible.com/t/ansible/build/52237d4c64644a1fb18e941b5da75bb8 ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> iosxr_l3_interfaces ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9 and devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ios ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> On the second run of play, the run should be idempotent ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> On the second run of play, the run is not idempotent <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61844
https://github.com/ansible/ansible/pull/61860
375eb9723aed60b121531a4b5134f50fada465ba
1425d2351397b3b4f442ba42d59f0ceb6516017e
2019-09-05T13:20:52Z
python
2019-09-05T20:39:15Z
test/integration/targets/iosxr_l3_interfaces/tests/cli/rtt.yaml
closed
ansible/ansible
https://github.com/ansible/ansible
61,616
2.10 needs more Led
##### SUMMARY The codename for 2.10 needs to be recorded in lib/ansible/release.py /cc @jimi-c ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME * lib/ansible/release.py ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.10 / devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61616
https://github.com/ansible/ansible/pull/61873
832e03d932da8e21f265abc4b4df27924d92fb47
f58899eef7bce5a682a52d33914d644e494ff898
2019-08-30T16:58:13Z
python
2019-09-06T01:36:06Z
.github/RELEASE_NAMES.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,616
2.10 needs more Led
##### SUMMARY The codename for 2.10 needs to be recorded in lib/ansible/release.py /cc @jimi-c ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME * lib/ansible/release.py ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.10 / devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61616
https://github.com/ansible/ansible/pull/61873
832e03d932da8e21f265abc4b4df27924d92fb47
f58899eef7bce5a682a52d33914d644e494ff898
2019-08-30T16:58:13Z
python
2019-09-06T01:36:06Z
docs/docsite/rst/dev_guide/testing/sanity/release-names.rst
closed
ansible/ansible
https://github.com/ansible/ansible
61,616
2.10 needs more Led
##### SUMMARY The codename for 2.10 needs to be recorded in lib/ansible/release.py /cc @jimi-c ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME * lib/ansible/release.py ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.10 / devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61616
https://github.com/ansible/ansible/pull/61873
832e03d932da8e21f265abc4b4df27924d92fb47
f58899eef7bce5a682a52d33914d644e494ff898
2019-08-30T16:58:13Z
python
2019-09-06T01:36:06Z
lib/ansible/release.py
# (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type __version__ = '2.10.0.dev0' __author__ = 'Ansible, Inc.' __codename__ = 'Immigrant Song'
closed
ansible/ansible
https://github.com/ansible/ansible
61,616
2.10 needs more Led
##### SUMMARY The codename for 2.10 needs to be recorded in lib/ansible/release.py /cc @jimi-c ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME * lib/ansible/release.py ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.10 / devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61616
https://github.com/ansible/ansible/pull/61873
832e03d932da8e21f265abc4b4df27924d92fb47
f58899eef7bce5a682a52d33914d644e494ff898
2019-08-30T16:58:13Z
python
2019-09-06T01:36:06Z
test/sanity/code-smell/release-names.json
closed
ansible/ansible
https://github.com/ansible/ansible
61,616
2.10 needs more Led
##### SUMMARY The codename for 2.10 needs to be recorded in lib/ansible/release.py /cc @jimi-c ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME * lib/ansible/release.py ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.10 / devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61616
https://github.com/ansible/ansible/pull/61873
832e03d932da8e21f265abc4b4df27924d92fb47
f58899eef7bce5a682a52d33914d644e494ff898
2019-08-30T16:58:13Z
python
2019-09-06T01:36:06Z
test/sanity/code-smell/release-names.py
closed
ansible/ansible
https://github.com/ansible/ansible
59,988
Collection does not work when namespace is ansible
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY Modules that I create inside collections that exist in the `ansible` namespace do not work. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ansible-galaxy collections ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ``` ansible 2.9.0.dev0 config file = None configured module search path = ['/home/meyers/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/meyers/ansible/virtualenv/ansible-dev3/local/lib/python3.7/site-packages/ansible executable location = /home/meyers/ansible/virtualenv/ansible-dev3/bin/ansible python version = 3.7.3 (default, Apr 3 2019, 05:39:12) [GCC 8.3.0] ``` ##### CONFIGURATION blank ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> * Create two adjacent collections `a.foo` and `ansible.foo` w/ a single module, `my_test.py`. * Call the module via `ansible.foo.my_test` and `a.foo.my_test` <!--- Paste example playbooks or commands between quotes below --> `collections/ansible_collections/ansible/foo/plugins/modules/my_test.py` ```yaml # my_test.py #!/usr/bin/python # Copyright: (c) 2018, Terry Jones <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: my_test short_description: This is my test module version_added: "2.4" description: - "This is my longer description explaining my test module" options: name: description: - This is the message to send to the test module required: true new: description: - Control to demo if the result of this module is changed or not required: false extends_documentation_fragment: - azure author: - Your Name (@yourhandle) ''' EXAMPLES = ''' # Pass in a message - name: Test with a message my_test: name: hello world # pass in a message and have changed true - name: Test with a message and changed output my_test: name: hello world new: true # fail the module - name: Test failure of the module my_test: name: fail me ''' RETURN = ''' original_message: description: The original name param that was passed in type: str returned: always message: description: The output message that the test module generates type: str returned: always ''' from ansible.module_utils.basic import AnsibleModule def run_module(): # define available arguments/parameters a user can pass to the module module_args = dict( name=dict(type='str', required=True), new=dict(type='bool', required=False, default=False) ) # seed the result dict in the object # we primarily care about changed and state # change is if this module effectively modified the target # state will include any data that you want your module to pass back # for consumption, for example, in a subsequent task result = dict( changed=False, original_message='', message='' ) # the AnsibleModule object will be our abstraction working with Ansible # this includes instantiation, a couple of common attr would be the # args/params passed to the execution, as well as if the module # supports check mode module = AnsibleModule( argument_spec=module_args, supports_check_mode=True ) # if the user is working with this module in only check mode we do not # want to make any changes to the environment, just return the current # state with no modifications if module.check_mode: module.exit_json(**result) # manipulate or modify the state as needed (this is going to be the # part where your module will do what it needs to do) result['original_message'] = module.params['name'] result['message'] = 'goodbye' # use whatever logic you need to determine whether or not this module # made any modifications to your target if module.params['new']: result['changed'] = True # during the execution of the module, if there is an exception or a # conditional state that effectively causes a failure, run # AnsibleModule.fail_json() to pass in the message and the result if module.params['name'] == 'fail me': module.fail_json(msg='You requested this to fail', **result) # in the event of a successful module execution, you will want to # simple AnsibleModule.exit_json(), passing the key/value results module.exit_json(**result) def main(): run_module() if __name__ == '__main__': main() ``` ```yaml # main.yml - hosts: localhost gather_facts: false tasks: - ansible.foo.my_test: name: "hello world" - a.foo.my_test: name: "hello world" ``` Copy the ansible subdir into a. ``` . ├── collections │   └── ansible_collections │   ├── a │   │   └── foo │   │   └── plugins │   │   └── modules │   │   └── my_test.py │   └── ansible │   └── foo │   └── plugins │   └── modules │   └── my_test.py └── main.yml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS * For both `ansible.foo.my_test` and `a.foo.my_test` to succeed. ##### ACTUAL RESULTS * `ansible.foo.my_test` fails because the module can not be found (see below) <!--- Paste verbatim command output between quotes --> ``` ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path. The error appears to be in '/home/meyers/ansible/ansible-examples/collection_ansible_namespace/main.yml': line 4, column 7, but may be elsewhere in the file depending on the exact syntax problem. The offending line appears to be: tasks: - ansible.foo.my_test: ```
https://github.com/ansible/ansible/issues/59988
https://github.com/ansible/ansible/pull/61908
f58899eef7bce5a682a52d33914d644e494ff898
7f4328ad1261295d43f58f3ebc42444ac03cc5ea
2019-08-02T15:40:51Z
python
2019-09-06T01:50:22Z
changelogs/fragments/allow_ansible_ns.yml
closed
ansible/ansible
https://github.com/ansible/ansible
59,988
Collection does not work when namespace is ansible
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY Modules that I create inside collections that exist in the `ansible` namespace do not work. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ansible-galaxy collections ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ``` ansible 2.9.0.dev0 config file = None configured module search path = ['/home/meyers/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/meyers/ansible/virtualenv/ansible-dev3/local/lib/python3.7/site-packages/ansible executable location = /home/meyers/ansible/virtualenv/ansible-dev3/bin/ansible python version = 3.7.3 (default, Apr 3 2019, 05:39:12) [GCC 8.3.0] ``` ##### CONFIGURATION blank ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> * Create two adjacent collections `a.foo` and `ansible.foo` w/ a single module, `my_test.py`. * Call the module via `ansible.foo.my_test` and `a.foo.my_test` <!--- Paste example playbooks or commands between quotes below --> `collections/ansible_collections/ansible/foo/plugins/modules/my_test.py` ```yaml # my_test.py #!/usr/bin/python # Copyright: (c) 2018, Terry Jones <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: my_test short_description: This is my test module version_added: "2.4" description: - "This is my longer description explaining my test module" options: name: description: - This is the message to send to the test module required: true new: description: - Control to demo if the result of this module is changed or not required: false extends_documentation_fragment: - azure author: - Your Name (@yourhandle) ''' EXAMPLES = ''' # Pass in a message - name: Test with a message my_test: name: hello world # pass in a message and have changed true - name: Test with a message and changed output my_test: name: hello world new: true # fail the module - name: Test failure of the module my_test: name: fail me ''' RETURN = ''' original_message: description: The original name param that was passed in type: str returned: always message: description: The output message that the test module generates type: str returned: always ''' from ansible.module_utils.basic import AnsibleModule def run_module(): # define available arguments/parameters a user can pass to the module module_args = dict( name=dict(type='str', required=True), new=dict(type='bool', required=False, default=False) ) # seed the result dict in the object # we primarily care about changed and state # change is if this module effectively modified the target # state will include any data that you want your module to pass back # for consumption, for example, in a subsequent task result = dict( changed=False, original_message='', message='' ) # the AnsibleModule object will be our abstraction working with Ansible # this includes instantiation, a couple of common attr would be the # args/params passed to the execution, as well as if the module # supports check mode module = AnsibleModule( argument_spec=module_args, supports_check_mode=True ) # if the user is working with this module in only check mode we do not # want to make any changes to the environment, just return the current # state with no modifications if module.check_mode: module.exit_json(**result) # manipulate or modify the state as needed (this is going to be the # part where your module will do what it needs to do) result['original_message'] = module.params['name'] result['message'] = 'goodbye' # use whatever logic you need to determine whether or not this module # made any modifications to your target if module.params['new']: result['changed'] = True # during the execution of the module, if there is an exception or a # conditional state that effectively causes a failure, run # AnsibleModule.fail_json() to pass in the message and the result if module.params['name'] == 'fail me': module.fail_json(msg='You requested this to fail', **result) # in the event of a successful module execution, you will want to # simple AnsibleModule.exit_json(), passing the key/value results module.exit_json(**result) def main(): run_module() if __name__ == '__main__': main() ``` ```yaml # main.yml - hosts: localhost gather_facts: false tasks: - ansible.foo.my_test: name: "hello world" - a.foo.my_test: name: "hello world" ``` Copy the ansible subdir into a. ``` . ├── collections │   └── ansible_collections │   ├── a │   │   └── foo │   │   └── plugins │   │   └── modules │   │   └── my_test.py │   └── ansible │   └── foo │   └── plugins │   └── modules │   └── my_test.py └── main.yml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS * For both `ansible.foo.my_test` and `a.foo.my_test` to succeed. ##### ACTUAL RESULTS * `ansible.foo.my_test` fails because the module can not be found (see below) <!--- Paste verbatim command output between quotes --> ``` ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path. The error appears to be in '/home/meyers/ansible/ansible-examples/collection_ansible_namespace/main.yml': line 4, column 7, but may be elsewhere in the file depending on the exact syntax problem. The offending line appears to be: tasks: - ansible.foo.my_test: ```
https://github.com/ansible/ansible/issues/59988
https://github.com/ansible/ansible/pull/61908
f58899eef7bce5a682a52d33914d644e494ff898
7f4328ad1261295d43f58f3ebc42444ac03cc5ea
2019-08-02T15:40:51Z
python
2019-09-06T01:50:22Z
lib/ansible/utils/collection_loader.py
# (c) 2019 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import os.path import pkgutil import re import sys from types import ModuleType from ansible.module_utils._text import to_bytes, to_native, to_text from ansible.module_utils.six import iteritems, string_types, with_metaclass from ansible.utils.singleton import Singleton # HACK: keep Python 2.6 controller tests happy in CI until they're properly split try: from importlib import import_module except ImportError: import_module = __import__ _SYNTHETIC_PACKAGES = { 'ansible_collections.ansible': dict(type='pkg_only'), 'ansible_collections.ansible.builtin': dict(type='pkg_only'), 'ansible_collections.ansible.builtin.plugins': dict(type='map', map='ansible.plugins'), 'ansible_collections.ansible.builtin.plugins.module_utils': dict(type='map', map='ansible.module_utils', graft=True), 'ansible_collections.ansible.builtin.plugins.modules': dict(type='flatmap', flatmap='ansible.modules', graft=True), } # FIXME: exception handling/error logging class AnsibleCollectionLoader(with_metaclass(Singleton, object)): def __init__(self, config=None): if config: self._n_configured_paths = config.get_config_value('COLLECTIONS_PATHS') else: self._n_configured_paths = os.environ.get('ANSIBLE_COLLECTIONS_PATHS', '').split(os.pathsep) if isinstance(self._n_configured_paths, string_types): self._n_configured_paths = [self._n_configured_paths] elif self._n_configured_paths is None: self._n_configured_paths = [] # expand any placeholders in configured paths self._n_configured_paths = [to_native(os.path.expanduser(p), errors='surrogate_or_strict') for p in self._n_configured_paths] self._n_playbook_paths = [] self._default_collection = None # pre-inject grafted package maps so we can force them to use the right loader instead of potentially delegating to a "normal" loader for syn_pkg_def in (p for p in iteritems(_SYNTHETIC_PACKAGES) if p[1].get('graft')): pkg_name = syn_pkg_def[0] pkg_def = syn_pkg_def[1] newmod = ModuleType(pkg_name) newmod.__package__ = pkg_name newmod.__file__ = '<ansible_synthetic_collection_package>' pkg_type = pkg_def.get('type') # TODO: need to rethink map style so we can just delegate all the loading if pkg_type == 'flatmap': newmod.__loader__ = AnsibleFlatMapLoader(import_module(pkg_def['flatmap'])) newmod.__path__ = [] sys.modules[pkg_name] = newmod @property def n_collection_paths(self): return self._n_playbook_paths + self._n_configured_paths def get_collection_path(self, collection_name): if not AnsibleCollectionRef.is_valid_collection_name(collection_name): raise ValueError('{0} is not a valid collection name'.format(to_native(collection_name))) m = import_module('ansible_collections.{0}'.format(collection_name)) return m.__file__ def set_playbook_paths(self, b_playbook_paths): if isinstance(b_playbook_paths, string_types): b_playbook_paths = [b_playbook_paths] # track visited paths; we have to preserve the dir order as-passed in case there are duplicate collections (first one wins) added_paths = set() # de-dupe and ensure the paths are native strings (Python seems to do this for package paths etc, so assume it's safe) self._n_playbook_paths = [os.path.join(to_native(p), 'collections') for p in b_playbook_paths if not (p in added_paths or added_paths.add(p))] # FIXME: only allow setting this once, or handle any necessary cache/package path invalidations internally? # FIXME: is there a better place to store this? # FIXME: only allow setting this once def set_default_collection(self, collection_name): self._default_collection = collection_name @property def default_collection(self): return self._default_collection def find_module(self, fullname, path=None): # this loader is only concerned with items under the Ansible Collections namespace hierarchy, ignore others if fullname.startswith('ansible_collections.') or fullname == 'ansible_collections': return self return None def load_module(self, fullname): if sys.modules.get(fullname): return sys.modules[fullname] # this loader implements key functionality for Ansible collections # * implicit distributed namespace packages for the root Ansible namespace (no pkgutil.extend_path hackery reqd) # * implicit package support for Python 2.7 (no need for __init__.py in collections, except to use standard Py2.7 tooling) # * preventing controller-side code injection during collection loading # * (default loader would execute arbitrary package code from all __init__.py's) parent_pkg_name = '.'.join(fullname.split('.')[:-1]) parent_pkg = sys.modules.get(parent_pkg_name) if parent_pkg_name and not parent_pkg: raise ImportError('parent package {0} not found'.format(parent_pkg_name)) # are we at or below the collection level? eg a.mynamespace.mycollection.something.else # if so, we don't want distributed namespace behavior; first mynamespace.mycollection on the path is where # we'll load everything from (ie, don't fall back to another mynamespace.mycollection lower on the path) sub_collection = fullname.count('.') > 1 synpkg_def = _SYNTHETIC_PACKAGES.get(fullname) synpkg_remainder = '' if not synpkg_def: synpkg_def = _SYNTHETIC_PACKAGES.get(parent_pkg_name) synpkg_remainder = '.' + fullname.rpartition('.')[2] # FIXME: collapse as much of this back to on-demand as possible (maybe stub packages that get replaced when actually loaded?) if synpkg_def: pkg_type = synpkg_def.get('type') if not pkg_type: raise KeyError('invalid synthetic package type (no package "type" specified)') if pkg_type == 'map': map_package = synpkg_def.get('map') if not map_package: raise KeyError('invalid synthetic map package definition (no target "map" defined)') mod = import_module(map_package + synpkg_remainder) sys.modules[fullname] = mod return mod elif pkg_type == 'flatmap': raise NotImplementedError() elif pkg_type == 'pkg_only': newmod = ModuleType(fullname) newmod.__package__ = fullname newmod.__file__ = '<ansible_synthetic_collection_package>' newmod.__loader__ = self newmod.__path__ = [] sys.modules[fullname] = newmod return newmod if not parent_pkg: # top-level package, look for NS subpackages on all collection paths package_paths = [self._extend_path_with_ns(p, fullname) for p in self.n_collection_paths] else: # subpackage; search in all subpaths (we'll limit later inside a collection) package_paths = [self._extend_path_with_ns(p, fullname) for p in parent_pkg.__path__] for candidate_child_path in package_paths: code_object = None is_package = True location = None # check for implicit sub-package first if os.path.isdir(to_bytes(candidate_child_path)): # Py3.x implicit namespace packages don't have a file location, so they don't support get_data # (which assumes the parent dir or that the loader has an internal mapping); so we have to provide # a bogus leaf file on the __file__ attribute for pkgutil.get_data to strip off location = os.path.join(candidate_child_path, '__synthetic__') else: for source_path in [os.path.join(candidate_child_path, '__init__.py'), candidate_child_path + '.py']: if not os.path.isfile(to_bytes(source_path)): continue with open(to_bytes(source_path), 'rb') as fd: source = fd.read() code_object = compile(source=source, filename=source_path, mode='exec', flags=0, dont_inherit=True) location = source_path is_package = source_path.endswith('__init__.py') break if not location: continue newmod = ModuleType(fullname) newmod.__file__ = location newmod.__loader__ = self if is_package: if sub_collection: # we never want to search multiple instances of the same collection; use first found newmod.__path__ = [candidate_child_path] else: newmod.__path__ = package_paths newmod.__package__ = fullname else: newmod.__package__ = parent_pkg_name sys.modules[fullname] = newmod if code_object: # FIXME: decide cases where we don't actually want to exec the code? exec(code_object, newmod.__dict__) return newmod # FIXME: need to handle the "no dirs present" case for at least the root and synthetic internal collections like ansible.builtin raise ImportError('module {0} not found'.format(fullname)) @staticmethod def _extend_path_with_ns(path, ns): ns_path_add = ns.rsplit('.', 1)[-1] return os.path.join(path, ns_path_add) def get_data(self, filename): with open(filename, 'rb') as fd: return fd.read() class AnsibleFlatMapLoader(object): _extension_blacklist = ['.pyc', '.pyo'] def __init__(self, root_package): self._root_package = root_package self._dirtree = None def _init_dirtree(self): # FIXME: thread safety root_path = os.path.dirname(self._root_package.__file__) flat_files = [] # FIXME: make this a dict of filename->dir for faster direct lookup? # FIXME: deal with _ prefixed deprecated files (or require another method for collections?) # FIXME: fix overloaded filenames (eg, rename Windows setup to win_setup) for root, dirs, files in os.walk(root_path): # add all files in this dir that don't have a blacklisted extension flat_files.extend(((root, f) for f in files if not any((f.endswith(ext) for ext in self._extension_blacklist)))) self._dirtree = flat_files def find_file(self, filename): # FIXME: thread safety if not self._dirtree: self._init_dirtree() if '.' not in filename: # no extension specified, use extension regex to filter extensionless_re = re.compile(r'^{0}(\..+)?$'.format(re.escape(filename))) # why doesn't Python have first()? try: # FIXME: store extensionless in a separate direct lookup? filepath = next(os.path.join(r, f) for r, f in self._dirtree if extensionless_re.match(f)) except StopIteration: raise IOError("couldn't find {0}".format(filename)) else: # actual filename, just look it up # FIXME: this case sucks; make it a lookup try: filepath = next(os.path.join(r, f) for r, f in self._dirtree if f == filename) except StopIteration: raise IOError("couldn't find {0}".format(filename)) return filepath def get_data(self, filename): found_file = self.find_file(filename) with open(found_file, 'rb') as fd: return fd.read() # TODO: implement these for easier inline debugging? # def get_source(self, fullname): # def get_code(self, fullname): # def is_package(self, fullname): class AnsibleCollectionRef: # FUTURE: introspect plugin loaders to get these dynamically? VALID_REF_TYPES = frozenset(to_text(r) for r in ['action', 'become', 'cache', 'callback', 'cliconf', 'connection', 'doc_fragments', 'filter', 'httpapi', 'inventory', 'lookup', 'module_utils', 'modules', 'netconf', 'role', 'shell', 'strategy', 'terminal', 'test', 'vars']) # FIXME: tighten this up to match Python identifier reqs, etc VALID_COLLECTION_NAME_RE = re.compile(to_text(r'^(\w+)\.(\w+)$')) VALID_SUBDIRS_RE = re.compile(to_text(r'^\w+(\.\w+)*$')) VALID_FQCR_RE = re.compile(to_text(r'^\w+\.\w+\.\w+(\.\w+)*$')) # can have 0-N included subdirs as well def __init__(self, collection_name, subdirs, resource, ref_type): """ Create an AnsibleCollectionRef from components :param collection_name: a collection name of the form 'namespace.collectionname' :param subdirs: optional subdir segments to be appended below the plugin type (eg, 'subdir1.subdir2') :param resource: the name of the resource being references (eg, 'mymodule', 'someaction', 'a_role') :param ref_type: the type of the reference, eg 'module', 'role', 'doc_fragment' """ collection_name = to_text(collection_name, errors='strict') if subdirs is not None: subdirs = to_text(subdirs, errors='strict') resource = to_text(resource, errors='strict') ref_type = to_text(ref_type, errors='strict') if not self.is_valid_collection_name(collection_name): raise ValueError('invalid collection name (must be of the form namespace.collection): {0}'.format(to_native(collection_name))) if ref_type not in self.VALID_REF_TYPES: raise ValueError('invalid collection ref_type: {0}'.format(ref_type)) self.collection = collection_name if subdirs: if not re.match(self.VALID_SUBDIRS_RE, subdirs): raise ValueError('invalid subdirs entry: {0} (must be empty/None or of the form subdir1.subdir2)'.format(to_native(subdirs))) self.subdirs = subdirs else: self.subdirs = u'' self.resource = resource self.ref_type = ref_type package_components = [u'ansible_collections', self.collection] if self.ref_type == u'role': package_components.append(u'roles') else: # we assume it's a plugin package_components += [u'plugins', self.ref_type] if self.subdirs: package_components.append(self.subdirs) if self.ref_type == u'role': # roles are their own resource package_components.append(self.resource) self.n_python_package_name = to_native('.'.join(package_components)) @staticmethod def from_fqcr(ref, ref_type): """ Parse a string as a fully-qualified collection reference, raises ValueError if invalid :param ref: collection reference to parse (a valid ref is of the form 'ns.coll.resource' or 'ns.coll.subdir1.subdir2.resource') :param ref_type: the type of the reference, eg 'module', 'role', 'doc_fragment' :return: a populated AnsibleCollectionRef object """ # assuming the fq_name is of the form (ns).(coll).(optional_subdir_N).(resource_name), # we split the resource name off the right, split ns and coll off the left, and we're left with any optional # subdirs that need to be added back below the plugin-specific subdir we'll add. So: # ns.coll.resource -> ansible_collections.ns.coll.plugins.(plugintype).resource # ns.coll.subdir1.resource -> ansible_collections.ns.coll.plugins.subdir1.(plugintype).resource # ns.coll.rolename -> ansible_collections.ns.coll.roles.rolename if not AnsibleCollectionRef.is_valid_fqcr(ref): raise ValueError('{0} is not a valid collection reference'.format(to_native(ref))) ref = to_text(ref, errors='strict') ref_type = to_text(ref_type, errors='strict') resource_splitname = ref.rsplit(u'.', 1) package_remnant = resource_splitname[0] resource = resource_splitname[1] # split the left two components of the collection package name off, anything remaining is plugin-type # specific subdirs to be added back on below the plugin type package_splitname = package_remnant.split(u'.', 2) if len(package_splitname) == 3: subdirs = package_splitname[2] else: subdirs = u'' collection_name = u'.'.join(package_splitname[0:2]) return AnsibleCollectionRef(collection_name, subdirs, resource, ref_type) @staticmethod def try_parse_fqcr(ref, ref_type): """ Attempt to parse a string as a fully-qualified collection reference, returning None on failure (instead of raising an error) :param ref: collection reference to parse (a valid ref is of the form 'ns.coll.resource' or 'ns.coll.subdir1.subdir2.resource') :param ref_type: the type of the reference, eg 'module', 'role', 'doc_fragment' :return: a populated AnsibleCollectionRef object on successful parsing, else None """ try: return AnsibleCollectionRef.from_fqcr(ref, ref_type) except ValueError: pass @staticmethod def legacy_plugin_dir_to_plugin_type(legacy_plugin_dir_name): """ Utility method to convert from a PluginLoader dir name to a plugin ref_type :param legacy_plugin_dir_name: PluginLoader dir name (eg, 'action_plugins', 'library') :return: the corresponding plugin ref_type (eg, 'action', 'role') """ legacy_plugin_dir_name = to_text(legacy_plugin_dir_name) plugin_type = legacy_plugin_dir_name.replace(u'_plugins', u'') if plugin_type == u'library': plugin_type = u'modules' if plugin_type not in AnsibleCollectionRef.VALID_REF_TYPES: raise ValueError('{0} cannot be mapped to a valid collection ref type'.format(to_native(legacy_plugin_dir_name))) return plugin_type @staticmethod def is_valid_fqcr(ref, ref_type=None): """ Validates if is string is a well-formed fully-qualified collection reference (does not look up the collection itself) :param ref: candidate collection reference to validate (a valid ref is of the form 'ns.coll.resource' or 'ns.coll.subdir1.subdir2.resource') :param ref_type: optional reference type to enable deeper validation, eg 'module', 'role', 'doc_fragment' :return: True if the collection ref passed is well-formed, False otherwise """ ref = to_text(ref) if not ref_type: return bool(re.match(AnsibleCollectionRef.VALID_FQCR_RE, ref)) return bool(AnsibleCollectionRef.try_parse_fqcr(ref, ref_type)) @staticmethod def is_valid_collection_name(collection_name): """ Validates if is string is a well-formed collection name (does not look up the collection itself) :param collection_name: candidate collection name to validate (a valid name is of the form 'ns.collname') :return: True if the collection name passed is well-formed, False otherwise """ collection_name = to_text(collection_name) return bool(re.match(AnsibleCollectionRef.VALID_COLLECTION_NAME_RE, collection_name)) def get_collection_role_path(role_name, collection_list=None): acr = AnsibleCollectionRef.try_parse_fqcr(role_name, 'role') if acr: # looks like a valid qualified collection ref; skip the collection_list role = acr.resource collection_list = [acr.collection] subdirs = acr.subdirs resource = acr.resource elif not collection_list: return None # not a FQ role and no collection search list spec'd, nothing to do else: resource = role_name # treat as unqualified, loop through the collection search list to try and resolve subdirs = '' for collection_name in collection_list: try: acr = AnsibleCollectionRef(collection_name=collection_name, subdirs=subdirs, resource=resource, ref_type='role') # FIXME: error handling/logging; need to catch any import failures and move along # FIXME: this line shouldn't be necessary, but py2 pkgutil.get_data is delegating back to built-in loader when it shouldn't pkg = import_module(acr.n_python_package_name) if pkg is not None: # the package is now loaded, get the collection's package and ask where it lives path = os.path.dirname(to_bytes(sys.modules[acr.n_python_package_name].__file__, errors='surrogate_or_strict')) return resource, to_text(path, errors='surrogate_or_strict'), collection_name except IOError: continue except Exception as ex: # FIXME: pick out typical import errors first, then error logging continue return None _N_COLLECTION_PATH_RE = re.compile(r'/ansible_collections/([^/]+)/([^/]+)') def get_collection_name_from_path(path): """ Return the containing collection name for a given path, or None if the path is not below a configured collection, or the collection cannot be loaded (eg, the collection is masked by another of the same name higher in the configured collection roots). :param n_path: native-string path to evaluate for collection containment :return: collection name or None """ n_collection_paths = [to_native(os.path.realpath(to_bytes(p))) for p in AnsibleCollectionLoader().n_collection_paths] b_path = os.path.realpath(to_bytes(path)) n_path = to_native(b_path) for coll_path in n_collection_paths: common_prefix = to_native(os.path.commonprefix([b_path, to_bytes(coll_path)])) if common_prefix == coll_path: # strip off the common prefix (handle weird testing cases of nested collection roots, eg) collection_remnant = n_path[len(coll_path):] # commonprefix may include the trailing /, prepend to the remnant if necessary (eg trailing / on root) if collection_remnant[0] != '/': collection_remnant = '/' + collection_remnant # the path lives under this collection root, see if it maps to a collection found_collection = _N_COLLECTION_PATH_RE.search(collection_remnant) if not found_collection: continue n_collection_name = '{0}.{1}'.format(*found_collection.groups()) loaded_collection_path = AnsibleCollectionLoader().get_collection_path(n_collection_name) if not loaded_collection_path: return None # ensure we're using the canonical real path, with the bogus __synthetic__ stripped off b_loaded_collection_path = os.path.dirname(os.path.realpath(to_bytes(loaded_collection_path))) # if the collection path prefix matches the path prefix we were passed, it's the same collection that's loaded if os.path.commonprefix([b_path, b_loaded_collection_path]) == b_loaded_collection_path: return n_collection_name return None # if not, it's a collection, but not the same collection the loader sees, so ignore it def set_collection_playbook_paths(b_playbook_paths): AnsibleCollectionLoader().set_playbook_paths(b_playbook_paths)
closed
ansible/ansible
https://github.com/ansible/ansible
59,988
Collection does not work when namespace is ansible
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY Modules that I create inside collections that exist in the `ansible` namespace do not work. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ansible-galaxy collections ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ``` ansible 2.9.0.dev0 config file = None configured module search path = ['/home/meyers/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/meyers/ansible/virtualenv/ansible-dev3/local/lib/python3.7/site-packages/ansible executable location = /home/meyers/ansible/virtualenv/ansible-dev3/bin/ansible python version = 3.7.3 (default, Apr 3 2019, 05:39:12) [GCC 8.3.0] ``` ##### CONFIGURATION blank ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> * Create two adjacent collections `a.foo` and `ansible.foo` w/ a single module, `my_test.py`. * Call the module via `ansible.foo.my_test` and `a.foo.my_test` <!--- Paste example playbooks or commands between quotes below --> `collections/ansible_collections/ansible/foo/plugins/modules/my_test.py` ```yaml # my_test.py #!/usr/bin/python # Copyright: (c) 2018, Terry Jones <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: my_test short_description: This is my test module version_added: "2.4" description: - "This is my longer description explaining my test module" options: name: description: - This is the message to send to the test module required: true new: description: - Control to demo if the result of this module is changed or not required: false extends_documentation_fragment: - azure author: - Your Name (@yourhandle) ''' EXAMPLES = ''' # Pass in a message - name: Test with a message my_test: name: hello world # pass in a message and have changed true - name: Test with a message and changed output my_test: name: hello world new: true # fail the module - name: Test failure of the module my_test: name: fail me ''' RETURN = ''' original_message: description: The original name param that was passed in type: str returned: always message: description: The output message that the test module generates type: str returned: always ''' from ansible.module_utils.basic import AnsibleModule def run_module(): # define available arguments/parameters a user can pass to the module module_args = dict( name=dict(type='str', required=True), new=dict(type='bool', required=False, default=False) ) # seed the result dict in the object # we primarily care about changed and state # change is if this module effectively modified the target # state will include any data that you want your module to pass back # for consumption, for example, in a subsequent task result = dict( changed=False, original_message='', message='' ) # the AnsibleModule object will be our abstraction working with Ansible # this includes instantiation, a couple of common attr would be the # args/params passed to the execution, as well as if the module # supports check mode module = AnsibleModule( argument_spec=module_args, supports_check_mode=True ) # if the user is working with this module in only check mode we do not # want to make any changes to the environment, just return the current # state with no modifications if module.check_mode: module.exit_json(**result) # manipulate or modify the state as needed (this is going to be the # part where your module will do what it needs to do) result['original_message'] = module.params['name'] result['message'] = 'goodbye' # use whatever logic you need to determine whether or not this module # made any modifications to your target if module.params['new']: result['changed'] = True # during the execution of the module, if there is an exception or a # conditional state that effectively causes a failure, run # AnsibleModule.fail_json() to pass in the message and the result if module.params['name'] == 'fail me': module.fail_json(msg='You requested this to fail', **result) # in the event of a successful module execution, you will want to # simple AnsibleModule.exit_json(), passing the key/value results module.exit_json(**result) def main(): run_module() if __name__ == '__main__': main() ``` ```yaml # main.yml - hosts: localhost gather_facts: false tasks: - ansible.foo.my_test: name: "hello world" - a.foo.my_test: name: "hello world" ``` Copy the ansible subdir into a. ``` . ├── collections │   └── ansible_collections │   ├── a │   │   └── foo │   │   └── plugins │   │   └── modules │   │   └── my_test.py │   └── ansible │   └── foo │   └── plugins │   └── modules │   └── my_test.py └── main.yml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS * For both `ansible.foo.my_test` and `a.foo.my_test` to succeed. ##### ACTUAL RESULTS * `ansible.foo.my_test` fails because the module can not be found (see below) <!--- Paste verbatim command output between quotes --> ``` ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path. The error appears to be in '/home/meyers/ansible/ansible-examples/collection_ansible_namespace/main.yml': line 4, column 7, but may be elsewhere in the file depending on the exact syntax problem. The offending line appears to be: tasks: - ansible.foo.my_test: ```
https://github.com/ansible/ansible/issues/59988
https://github.com/ansible/ansible/pull/61908
f58899eef7bce5a682a52d33914d644e494ff898
7f4328ad1261295d43f58f3ebc42444ac03cc5ea
2019-08-02T15:40:51Z
python
2019-09-06T01:50:22Z
test/integration/targets/collections/collection_root_user/ansible_collections/ansible/builtin/plugins/modules/ping.py
closed
ansible/ansible
https://github.com/ansible/ansible
59,988
Collection does not work when namespace is ansible
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY Modules that I create inside collections that exist in the `ansible` namespace do not work. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ansible-galaxy collections ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ``` ansible 2.9.0.dev0 config file = None configured module search path = ['/home/meyers/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/meyers/ansible/virtualenv/ansible-dev3/local/lib/python3.7/site-packages/ansible executable location = /home/meyers/ansible/virtualenv/ansible-dev3/bin/ansible python version = 3.7.3 (default, Apr 3 2019, 05:39:12) [GCC 8.3.0] ``` ##### CONFIGURATION blank ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> * Create two adjacent collections `a.foo` and `ansible.foo` w/ a single module, `my_test.py`. * Call the module via `ansible.foo.my_test` and `a.foo.my_test` <!--- Paste example playbooks or commands between quotes below --> `collections/ansible_collections/ansible/foo/plugins/modules/my_test.py` ```yaml # my_test.py #!/usr/bin/python # Copyright: (c) 2018, Terry Jones <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: my_test short_description: This is my test module version_added: "2.4" description: - "This is my longer description explaining my test module" options: name: description: - This is the message to send to the test module required: true new: description: - Control to demo if the result of this module is changed or not required: false extends_documentation_fragment: - azure author: - Your Name (@yourhandle) ''' EXAMPLES = ''' # Pass in a message - name: Test with a message my_test: name: hello world # pass in a message and have changed true - name: Test with a message and changed output my_test: name: hello world new: true # fail the module - name: Test failure of the module my_test: name: fail me ''' RETURN = ''' original_message: description: The original name param that was passed in type: str returned: always message: description: The output message that the test module generates type: str returned: always ''' from ansible.module_utils.basic import AnsibleModule def run_module(): # define available arguments/parameters a user can pass to the module module_args = dict( name=dict(type='str', required=True), new=dict(type='bool', required=False, default=False) ) # seed the result dict in the object # we primarily care about changed and state # change is if this module effectively modified the target # state will include any data that you want your module to pass back # for consumption, for example, in a subsequent task result = dict( changed=False, original_message='', message='' ) # the AnsibleModule object will be our abstraction working with Ansible # this includes instantiation, a couple of common attr would be the # args/params passed to the execution, as well as if the module # supports check mode module = AnsibleModule( argument_spec=module_args, supports_check_mode=True ) # if the user is working with this module in only check mode we do not # want to make any changes to the environment, just return the current # state with no modifications if module.check_mode: module.exit_json(**result) # manipulate or modify the state as needed (this is going to be the # part where your module will do what it needs to do) result['original_message'] = module.params['name'] result['message'] = 'goodbye' # use whatever logic you need to determine whether or not this module # made any modifications to your target if module.params['new']: result['changed'] = True # during the execution of the module, if there is an exception or a # conditional state that effectively causes a failure, run # AnsibleModule.fail_json() to pass in the message and the result if module.params['name'] == 'fail me': module.fail_json(msg='You requested this to fail', **result) # in the event of a successful module execution, you will want to # simple AnsibleModule.exit_json(), passing the key/value results module.exit_json(**result) def main(): run_module() if __name__ == '__main__': main() ``` ```yaml # main.yml - hosts: localhost gather_facts: false tasks: - ansible.foo.my_test: name: "hello world" - a.foo.my_test: name: "hello world" ``` Copy the ansible subdir into a. ``` . ├── collections │   └── ansible_collections │   ├── a │   │   └── foo │   │   └── plugins │   │   └── modules │   │   └── my_test.py │   └── ansible │   └── foo │   └── plugins │   └── modules │   └── my_test.py └── main.yml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS * For both `ansible.foo.my_test` and `a.foo.my_test` to succeed. ##### ACTUAL RESULTS * `ansible.foo.my_test` fails because the module can not be found (see below) <!--- Paste verbatim command output between quotes --> ``` ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path. The error appears to be in '/home/meyers/ansible/ansible-examples/collection_ansible_namespace/main.yml': line 4, column 7, but may be elsewhere in the file depending on the exact syntax problem. The offending line appears to be: tasks: - ansible.foo.my_test: ```
https://github.com/ansible/ansible/issues/59988
https://github.com/ansible/ansible/pull/61908
f58899eef7bce5a682a52d33914d644e494ff898
7f4328ad1261295d43f58f3ebc42444ac03cc5ea
2019-08-02T15:40:51Z
python
2019-09-06T01:50:22Z
test/integration/targets/collections/collection_root_user/ansible_collections/ansible/bullcoll/plugins/modules/bullmodule.py
closed
ansible/ansible
https://github.com/ansible/ansible
59,988
Collection does not work when namespace is ansible
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY Modules that I create inside collections that exist in the `ansible` namespace do not work. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ansible-galaxy collections ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ``` ansible 2.9.0.dev0 config file = None configured module search path = ['/home/meyers/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/meyers/ansible/virtualenv/ansible-dev3/local/lib/python3.7/site-packages/ansible executable location = /home/meyers/ansible/virtualenv/ansible-dev3/bin/ansible python version = 3.7.3 (default, Apr 3 2019, 05:39:12) [GCC 8.3.0] ``` ##### CONFIGURATION blank ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> * Create two adjacent collections `a.foo` and `ansible.foo` w/ a single module, `my_test.py`. * Call the module via `ansible.foo.my_test` and `a.foo.my_test` <!--- Paste example playbooks or commands between quotes below --> `collections/ansible_collections/ansible/foo/plugins/modules/my_test.py` ```yaml # my_test.py #!/usr/bin/python # Copyright: (c) 2018, Terry Jones <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: my_test short_description: This is my test module version_added: "2.4" description: - "This is my longer description explaining my test module" options: name: description: - This is the message to send to the test module required: true new: description: - Control to demo if the result of this module is changed or not required: false extends_documentation_fragment: - azure author: - Your Name (@yourhandle) ''' EXAMPLES = ''' # Pass in a message - name: Test with a message my_test: name: hello world # pass in a message and have changed true - name: Test with a message and changed output my_test: name: hello world new: true # fail the module - name: Test failure of the module my_test: name: fail me ''' RETURN = ''' original_message: description: The original name param that was passed in type: str returned: always message: description: The output message that the test module generates type: str returned: always ''' from ansible.module_utils.basic import AnsibleModule def run_module(): # define available arguments/parameters a user can pass to the module module_args = dict( name=dict(type='str', required=True), new=dict(type='bool', required=False, default=False) ) # seed the result dict in the object # we primarily care about changed and state # change is if this module effectively modified the target # state will include any data that you want your module to pass back # for consumption, for example, in a subsequent task result = dict( changed=False, original_message='', message='' ) # the AnsibleModule object will be our abstraction working with Ansible # this includes instantiation, a couple of common attr would be the # args/params passed to the execution, as well as if the module # supports check mode module = AnsibleModule( argument_spec=module_args, supports_check_mode=True ) # if the user is working with this module in only check mode we do not # want to make any changes to the environment, just return the current # state with no modifications if module.check_mode: module.exit_json(**result) # manipulate or modify the state as needed (this is going to be the # part where your module will do what it needs to do) result['original_message'] = module.params['name'] result['message'] = 'goodbye' # use whatever logic you need to determine whether or not this module # made any modifications to your target if module.params['new']: result['changed'] = True # during the execution of the module, if there is an exception or a # conditional state that effectively causes a failure, run # AnsibleModule.fail_json() to pass in the message and the result if module.params['name'] == 'fail me': module.fail_json(msg='You requested this to fail', **result) # in the event of a successful module execution, you will want to # simple AnsibleModule.exit_json(), passing the key/value results module.exit_json(**result) def main(): run_module() if __name__ == '__main__': main() ``` ```yaml # main.yml - hosts: localhost gather_facts: false tasks: - ansible.foo.my_test: name: "hello world" - a.foo.my_test: name: "hello world" ``` Copy the ansible subdir into a. ``` . ├── collections │   └── ansible_collections │   ├── a │   │   └── foo │   │   └── plugins │   │   └── modules │   │   └── my_test.py │   └── ansible │   └── foo │   └── plugins │   └── modules │   └── my_test.py └── main.yml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS * For both `ansible.foo.my_test` and `a.foo.my_test` to succeed. ##### ACTUAL RESULTS * `ansible.foo.my_test` fails because the module can not be found (see below) <!--- Paste verbatim command output between quotes --> ``` ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path. The error appears to be in '/home/meyers/ansible/ansible-examples/collection_ansible_namespace/main.yml': line 4, column 7, but may be elsewhere in the file depending on the exact syntax problem. The offending line appears to be: tasks: - ansible.foo.my_test: ```
https://github.com/ansible/ansible/issues/59988
https://github.com/ansible/ansible/pull/61908
f58899eef7bce5a682a52d33914d644e494ff898
7f4328ad1261295d43f58f3ebc42444ac03cc5ea
2019-08-02T15:40:51Z
python
2019-09-06T01:50:22Z
test/integration/targets/collections/posix.yml
- hosts: testhost tasks: # basic test of FQ module lookup and that we got the right one (user-dir hosted) - name: exec FQ module in a user-dir testns collection testns.testcoll.testmodule: register: testmodule_out # verifies that distributed collection subpackages are visible under a multi-location namespace (testns exists in user and sys locations) - name: exec FQ module in a sys-dir testns collection testns.coll_in_sys.systestmodule: register: systestmodule_out # verifies that content-adjacent collections were automatically added to the installed content roots - name: exec FQ module from content-adjacent collection testns.content_adj.contentadjmodule: register: contentadjmodule_out # content should only be loaded from the first visible instance of a collection - name: attempt to look up FQ module in a masked collection testns.testcoll.plugin_lookup: type: module name: testns.testcoll.maskedmodule register: maskedmodule_out # action in a collection subdir - name: test subdir action FQ testns.testcoll.action_subdir.subdir_ping_action: register: subdir_ping_action_out # module in a collection subdir - name: test subdir module FQ testns.testcoll.module_subdir.subdir_ping_module: register: subdir_ping_module_out # module with a granular module_utils import (from (this collection).module_utils.leaf import thingtocall) - name: exec module with granular module utils import from this collection testns.testcoll.uses_leaf_mu_granular_import: register: granular_out # module with a granular nested module_utils import (from (this collection).module_utils.base import thingtocall, # where base imports secondary from the same collection's module_utils) - name: exec module with nested module utils from this collection testns.testcoll.uses_base_mu_granular_nested_import: register: granular_nested_out # module with a flat module_utils import (import (this collection).module_utils.leaf) - name: exec module with flat module_utils import from this collection testns.testcoll.uses_leaf_mu_flat_import: register: flat_out # module with a full-module module_utils import using 'from' (from (this collection).module_utils import leaf) - name: exec module with full-module module_utils import using 'from' from this collection testns.testcoll.uses_leaf_mu_module_import_from: register: from_out - assert: that: - testmodule_out.source == 'user' - systestmodule_out.source == 'sys' - contentadjmodule_out.source == 'content_adj' - not maskedmodule_out.plugin_path - subdir_ping_action_out is not changed - subdir_ping_module_out is not changed - granular_out.mu_result == 'thingtocall in leaf' - granular_nested_out.mu_result == 'thingtocall in base called thingtocall in secondary' - flat_out.mu_result == 'thingtocall in leaf' - from_out.mu_result == 'thingtocall in leaf' - from_out.mu2_result == 'thingtocall in secondary' - hosts: testhost tasks: - name: exercise filters/tests/lookups assert: that: - "'data' | testns.testcoll.testfilter == 'data_via_testfilter_from_userdir'" - "'data' | testns.testcoll.testfilter2 == 'data_via_testfilter2_from_userdir'" - "'data' | testns.testcoll.filter_subdir.test_subdir_filter == 'data_via_testfilter_from_subdir'" - "'from_user' is testns.testcoll.testtest" - "'from_user2' is testns.testcoll.testtest2" - "'subdir_from_user' is testns.testcoll.test_subdir.subdir_test" - lookup('testns.testcoll.mylookup') == 'mylookup_from_user_dir' - lookup('testns.testcoll.mylookup2') == 'mylookup2_from_user_dir' - lookup('testns.testcoll.lookup_subdir.my_subdir_lookup') == 'subdir_lookup_from_user_dir' # ensure that the synthetic ansible.builtin collection limits to builtin plugins, that ansible.legacy loads overrides # from legacy plugin dirs, and that a same-named plugin loaded from a real collection is not masked by the others - hosts: testhost tasks: - name: test unqualified ping from library dir ping: register: unqualified_ping_out - name: test legacy-qualified ping from library dir ansible.legacy.ping: register: legacy_ping_out - name: test builtin ping ansible.builtin.ping: register: builtin_ping_out - name: test collection-based ping testns.testcoll.ping: register: collection_ping_out - assert: that: - unqualified_ping_out.source == 'legacy_library_dir' - legacy_ping_out.source == 'legacy_library_dir' - builtin_ping_out.ping == 'pong' - collection_ping_out.source == 'user' # verify the default value for the collections list is empty - hosts: testhost tasks: - name: sample default collections value testns.testcoll.plugin_lookup: register: coll_default_out - assert: that: # in original release, collections defaults to empty, which is mostly equivalent to ansible.legacy - not coll_default_out.collection_list # ensure that inheritance/masking works as expected, that the proper default values are injected when missing, # and that the order is preserved if one of the magic values is explicitly specified - name: verify collections keyword play/block/task inheritance and magic values hosts: testhost collections: - bogus.fromplay tasks: - name: sample play collections value testns.testcoll.plugin_lookup: register: coll_play_out - name: collections override block-level collections: - bogus.fromblock block: - name: sample block collections value testns.testcoll.plugin_lookup: register: coll_block_out - name: sample task collections value collections: - bogus.fromtask testns.testcoll.plugin_lookup: register: coll_task_out - name: sample task with explicit core collections: - ansible.builtin - bogus.fromtaskexplicitcore testns.testcoll.plugin_lookup: register: coll_task_core - name: sample task with explicit legacy collections: - ansible.legacy - bogus.fromtaskexplicitlegacy testns.testcoll.plugin_lookup: register: coll_task_legacy - assert: that: # ensure that parent value inheritance is masked properly by explicit setting - coll_play_out.collection_list == ['bogus.fromplay', 'ansible.legacy'] - coll_block_out.collection_list == ['bogus.fromblock', 'ansible.legacy'] - coll_task_out.collection_list == ['bogus.fromtask', 'ansible.legacy'] - coll_task_core.collection_list == ['ansible.builtin', 'bogus.fromtaskexplicitcore'] - coll_task_legacy.collection_list == ['ansible.legacy', 'bogus.fromtaskexplicitlegacy'] - name: verify unqualified plugin resolution behavior hosts: testhost collections: - testns.testcoll - testns.coll_in_sys - testns.contentadj tasks: # basic test of unqualified module lookup and that we got the right one (user-dir hosted, there's another copy of # this one in the same-named collection in sys dir that should be masked - name: exec unqualified module in a user-dir testns collection testmodule: register: testmodule_out # use another collection to verify that we're looking in all collections listed on the play - name: exec unqualified module in a sys-dir testns collection systestmodule: register: systestmodule_out - assert: that: - testmodule_out.source == 'user' - systestmodule_out.source == 'sys' # test keyword-static execution of a FQ collection-backed role with "tasks/main.yaml" - name: verify collection-backed role execution (keyword static) hosts: testhost collections: # set to ansible.builtin only to ensure that roles function properly without inheriting the play's collections config - ansible.builtin vars: test_role_input: keyword static roles: - role: testns.testcoll.testrole_main_yaml tasks: - name: ensure role executed assert: that: - test_role_output.msg == test_role_input - testrole_source == 'collection' # test dynamic execution of a FQ collection-backed role - name: verify collection-backed role execution (dynamic) hosts: testhost collections: # set to ansible.builtin only to ensure that roles function properly without inheriting the play's collections config - ansible.builtin vars: test_role_input: dynamic tasks: - include_role: name: testns.testcoll.testrole - name: ensure role executed assert: that: - test_role_output.msg == test_role_input - testrole_source == 'collection' # test task-static execution of a FQ collection-backed role - name: verify collection-backed role execution (task static) hosts: testhost collections: - ansible.builtin vars: test_role_input: task static tasks: - import_role: name: testns.testcoll.testrole - name: ensure role executed assert: that: - test_role_output.msg == test_role_input - testrole_source == 'collection' # test a legacy playbook-adjacent role, ensure that play collections config is not inherited - name: verify legacy playbook-adjacent role behavior hosts: testhost collections: - bogus.bogus vars: test_role_input: legacy playbook-adjacent roles: - testrole # FIXME: this should technically work to look up a playbook-adjacent role # - ansible.legacy.testrole tasks: - name: ensure role executed assert: that: - test_role_output.msg == test_role_input - testrole_source == 'legacy roles dir' # test dynamic execution of a FQ collection-backed role - name: verify collection-backed role execution in subdir (include) hosts: testhost vars: test_role_input: dynamic (subdir) tasks: - include_role: name: testns.testcoll.role_subdir.subdir_testrole - name: ensure role executed assert: that: - test_role_output.msg == test_role_input - testrole_source == 'collection' # test collection-relative role deps (keyword static) - name: verify collection-relative role deps hosts: testhost vars: outer_role_input: keyword static outer test_role_input: keyword static inner roles: - testns.testcoll.calls_intra_collection_dep_role_unqualified tasks: - assert: that: - outer_role_output.msg == outer_role_input - test_role_output.msg == test_role_input - testrole_source == 'collection' # test collection-relative role deps (task static) - name: verify collection-relative role deps hosts: testhost vars: outer_role_input: task static outer test_role_input: task static inner tasks: - import_role: name: testns.testcoll.calls_intra_collection_dep_role_unqualified - assert: that: - outer_role_output.msg == outer_role_input - test_role_output.msg == test_role_input - testrole_source == 'collection' # test collection-relative role deps (task dynamic) - name: verify collection-relative role deps hosts: testhost vars: outer_role_input: task dynamic outer test_role_input: task dynamic inner tasks: - include_role: name: testns.testcoll.calls_intra_collection_dep_role_unqualified - assert: that: - outer_role_output.msg == outer_role_input - test_role_output.msg == test_role_input - testrole_source == 'collection' - name: validate static task include behavior hosts: testhost collections: - bogus.bogus tasks: - import_tasks: includeme.yml - name: validate dynamic task include behavior hosts: testhost collections: - bogus.bogus tasks: - include_tasks: includeme.yml - name: test a collection-hosted connection plugin against a host from a collection-hosted inventory plugin hosts: dynamic_host_a vars: ansible_connection: testns.testcoll.localconn ansible_localconn_connectionvar: from_play tasks: - raw: echo 'hello world' register: connection_out - assert: that: - connection_out.stdout == "localconn ran echo 'hello world'" # ensure that the connection var we overrode above made it into the running config - connection_out.stderr == "connectionvar is from_play" - hosts: testhost tasks: - assert: that: - hostvars['dynamic_host_a'] is defined - hostvars['dynamic_host_a'].connection_out.stdout == "localconn ran echo 'hello world'"
closed
ansible/ansible
https://github.com/ansible/ansible
61,195
Setting ec2_asg `metrics_collection: yes` results in module always reporting modifications
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> Similar to #61085, there is a comparison bug which is encountered when the module attempts to compare the value of "EnabledMetrics". The ordering of the input list might not match the API response's ordering for the existing ASG, which will cause the module to believe the ASG has been modified, even if it hasn't. This bug is only encountered when `metrics_collection: yes`. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ec2_asg ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.8.4 ``` ##### CONFIGURATION N/A ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Arch, Red Hat 7, CentOS 7 ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Set `metrics_collection: yes`, run task multiple times without any modification. Since the API response ordering does not match the default value, the resource will always appear as "modified". <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> If no changes are introduced, the task should not report any modifications. ##### ACTUAL RESULTS Ansible reports modifications to the ASGs.
https://github.com/ansible/ansible/issues/61195
https://github.com/ansible/ansible/pull/61284
d7604844c2a489bb13216dd6340345ac2bb1df7f
b8650c0a50eb76aa1146ea7119d3451e2253037f
2019-08-22T19:34:22Z
python
2019-09-06T19:48:40Z
changelogs/fragments/61284-ec2_asg-idempotency.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,195
Setting ec2_asg `metrics_collection: yes` results in module always reporting modifications
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> Similar to #61085, there is a comparison bug which is encountered when the module attempts to compare the value of "EnabledMetrics". The ordering of the input list might not match the API response's ordering for the existing ASG, which will cause the module to believe the ASG has been modified, even if it hasn't. This bug is only encountered when `metrics_collection: yes`. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ec2_asg ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.8.4 ``` ##### CONFIGURATION N/A ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Arch, Red Hat 7, CentOS 7 ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Set `metrics_collection: yes`, run task multiple times without any modification. Since the API response ordering does not match the default value, the resource will always appear as "modified". <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> If no changes are introduced, the task should not report any modifications. ##### ACTUAL RESULTS Ansible reports modifications to the ASGs.
https://github.com/ansible/ansible/issues/61195
https://github.com/ansible/ansible/pull/61284
d7604844c2a489bb13216dd6340345ac2bb1df7f
b8650c0a50eb76aa1146ea7119d3451e2253037f
2019-08-22T19:34:22Z
python
2019-09-06T19:48:40Z
hacking/aws_config/testing_policies/compute-policy.json
{# Not all Autoscaling API Actions allow specified resources #} {# See http://docs.aws.amazon.com/autoscaling/latest/userguide/control-access-using-iam.html#policy-auto-scaling-resources #} { "Version": "2012-10-17", "Statement": [ { "Sid": "DescribeAutoscaling", "Effect": "Allow", "Action": [ "autoscaling:DescribeAutoScalingGroups", "autoscaling:DescribeLaunchConfigurations", "autoscaling:DescribePolicies" ], "Resource": "*" }, { "Sid": "AllowAutoscaling", "Effect": "Allow", "Action": [ "autoscaling:*LaunchConfiguration", "autoscaling:*AutoScalingGroup", "autoscaling:*MetricsCollection", "autoscaling:PutScalingPolicy", "autoscaling:DeletePolicy" ], "Resource": [ "arn:aws:autoscaling:{{aws_region}}:{{aws_account}}:*" ] }, {# Note that not all EC2 API Actions allow a specific resource #} {# See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ec2-api-permissions.html#ec2-api-unsupported-resource-permissions #} { "Sid": "AllowUnspecifiedEC2Resource", "Effect": "Allow", "Action": [ "ec2:*LaunchTemplate", "ec2:*LaunchTemplateVersion", "ec2:*LaunchTemplateVersions", "ec2:AllocateAddress", "ec2:AssociateAddress", "ec2:AssociateDhcpOptions", "ec2:AssociateRouteTable", "ec2:AssociateVpcCidrBlock", "ec2:AssociateSubnetCidrBlock", "ec2:AttachInternetGateway", "ec2:AttachNetworkInterface", "ec2:AttachVolume", "ec2:AttachVpnGateway", "ec2:CreateCustomerGateway", "ec2:CreateDhcpOptions", "ec2:CreateImage", "ec2:CreateInternetGateway", "ec2:CreateKeyPair", "ec2:CreateNatGateway", "ec2:CreateNetworkInterface", "ec2:CreateRoute", "ec2:CreateRouteTable", "ec2:CreateSecurityGroup", "ec2:CreateSnapshot", "ec2:CreateSubnet", "ec2:CreateTags", "ec2:CreateVpc", "ec2:CreateVpnConnection", "ec2:CreateVpnGateway", "ec2:DeleteCustomerGateway", "ec2:DeleteDhcpOptions", "ec2:DeleteInternetGateway", "ec2:DeleteKeyPair", "ec2:DeleteNatGateway", "ec2:DeleteNetworkInterface", "ec2:DeleteRoute", "ec2:DeleteRouteTable", "ec2:DeleteSnapshot", "ec2:DeleteSubnet", "ec2:DeleteTags", "ec2:DeleteVpc", "ec2:DeleteVpnConnection", "ec2:DeleteVpnGateway", "ec2:DeregisterImage", "ec2:DetachInternetGateway", "ec2:DetachVpnGateway", "ec2:Describe*", "ec2:DisassociateAddress", "ec2:DisassociateRouteTable", "ec2:DisassociateSubnetCidrBlock", "ec2:ImportKeyPair", "ec2:ModifyImageAttribute", "ec2:ModifyInstanceAttribute", "ec2:ModifySubnetAttribute", "ec2:ModifyVpcAttribute", "ec2:RegisterImage", "ec2:ReleaseAddress", "ec2:ReplaceRouteTableAssociation", "ec2:ReplaceIamInstanceProfileAssociation", "ec2:ReportInstanceStatus" ], "Resource": "*" }, { "Sid": "AllowSpecifiedEC2Resource", "Effect": "Allow", "Action": [ "ec2:AuthorizeSecurityGroupIngress", "ec2:AuthorizeSecurityGroupEgress", "ec2:CreateTags", "ec2:CreateVolume", "ec2:DeleteRouteTable", "ec2:DeleteSecurityGroup", "ec2:DeleteVolume", "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress", "ec2:RunInstances", "ec2:StartInstances", "ec2:StopInstances", "ec2:TerminateInstances", "ec2:UpdateSecurityGroupRuleDescriptionsIngress", "ec2:UpdateSecurityGroupRuleDescriptionsEgress" ], "Resource": [ "arn:aws:ec2:{{aws_region}}::image/*", "arn:aws:ec2:{{aws_region}}:{{aws_account}}:*" ] }, {# According to http://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/load-balancer-authentication-access-control.html #} {# Resource level access control is not possible for the new ELB API (providing Application Load Balancer functionality #} {# While it remains possible for the old API, there is no distinction of the Actions between old API and new API #} { "Sid": "AllowLoadBalancerOperations", "Effect": "Allow", "Action": [ "elasticloadbalancing:AddTags", "elasticloadbalancing:ConfigureHealthCheck", "elasticloadbalancing:CreateListener", "elasticloadbalancing:CreateLoadBalancer", "elasticloadbalancing:CreateLoadBalancerListeners", "elasticloadbalancing:CreateRule", "elasticloadbalancing:CreateTargetGroup", "elasticloadbalancing:DeleteListener", "elasticloadbalancing:DeleteLoadBalancer", "elasticloadbalancing:DeleteLoadBalancerListeners", "elasticloadbalancing:DeleteRule", "elasticloadbalancing:DeleteTargetGroup", "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", "elasticloadbalancing:DescribeInstanceHealth", "elasticloadbalancing:DescribeLoadBalancer*", "elasticloadbalancing:DescribeTags", "elasticloadbalancing:DisableAvailabilityZonesForLoadBalancer", "elasticloadbalancing:EnableAvailabilityZonesForLoadBalancer", "elasticloadbalancing:ModifyListener", "elasticloadbalancing:ModifyLoadBalancerAttributes", "elasticloadbalancing:ModifyRule", "elasticloadbalancing:RegisterInstancesWithLoadBalancer", "elasticloadbalancing:RemoveTags" ], "Resource": "*" }, {# Only certain lambda actions can be restricted to a specific resource #} {# http://docs.aws.amazon.com/lambda/latest/dg/lambda-api-permissions-ref.html #} { "Sid": "AllowApiGateway", "Effect": "Allow", "Action": [ "apigateway:*" ], "Resource": [ "arn:aws:apigateway:{{aws_region}}::/*" ] }, { "Sid": "AllowGetUserForLambdaCreation", "Effect": "Allow", "Action": [ "iam:GetUser" ], "Resource": [ "arn:aws:iam::{{aws_account}}:user/ansible_integration_tests" ] }, { "Sid": "AllowLambdaManagementWithoutResource", "Effect": "Allow", "Action": [ "lambda:CreateEventSourceMapping", "lambda:GetAccountSettings", "lambda:GetEventSourceMapping", "lambda:List*", "lambda:TagResource", "lambda:UntagResource" ], "Resource": "*" }, { "Sid": "AllowLambdaManagementWithResource", "Effect": "Allow", "Action": [ "lambda:AddPermission", "lambda:CreateAlias", "lambda:CreateFunction", "lambda:DeleteAlias", "lambda:DeleteFunction", "lambda:GetAlias", "lambda:GetFunction", "lambda:GetFunctionConfiguration", "lambda:GetPolicy", "lambda:InvokeFunction", "lambda:PublishVersion", "lambda:RemovePermission", "lambda:UpdateAlias", "lambda:UpdateEventSourceMapping", "lambda:UpdateFunctionCode", "lambda:UpdateFunctionConfiguration" ], "Resource": "arn:aws:lambda:{{aws_region}}:{{aws_account}}:function:*" }, { "Sid": "AllowRoleManagement", "Effect": "Allow", "Action": [ "iam:PassRole" ], "Resource": [ "arn:aws:iam::{{aws_account}}:role/ansible_lambda_role", "arn:aws:iam::{{aws_account}}:role/ecsInstanceRole", "arn:aws:iam::{{aws_account}}:role/ec2InstanceRole", "arn:aws:iam::{{aws_account}}:role/ecsServiceRole", "arn:aws:iam::{{aws_account}}:role/aws_eks_cluster_role", "arn:aws:iam::{{aws_account}}:role/ecsTaskExecutionRole" ] }, { "Sid": "AllowSESManagement", "Effect": "Allow", "Action": [ "ses:VerifyEmailIdentity", "ses:DeleteIdentity", "ses:GetIdentityVerificationAttributes", "ses:GetIdentityNotificationAttributes", "ses:VerifyDomainIdentity", "ses:SetIdentityNotificationTopic", "ses:SetIdentityHeadersInNotificationsEnabled", "ses:SetIdentityFeedbackForwardingEnabled", "ses:GetIdentityPolicies", "ses:PutIdentityPolicy", "ses:DeleteIdentityPolicy", "ses:ListIdentityPolicies", "ses:SetIdentityFeedbackForwardingEnabled", "ses:ListReceiptRuleSets", "ses:DescribeReceiptRuleSet", "ses:DescribeActiveReceiptRuleSet", "ses:SetActiveReceiptRuleSet", "ses:CreateReceiptRuleSet", "ses:DeleteReceiptRuleSet" ], "Resource": [ "*" ] }, { "Sid": "AllowSNSManagement", "Effect": "Allow", "Action": [ "SNS:CreateTopic", "SNS:DeleteTopic", "SNS:GetTopicAttributes", "SNS:ListSubscriptions", "SNS:ListSubscriptionsByTopic", "SNS:ListTopics", "SNS:SetTopicAttributes", "SNS:Subscribe", "SNS:Unsubscribe" ], "Resource": [ "*" ] } ] }
closed
ansible/ansible
https://github.com/ansible/ansible
61,195
Setting ec2_asg `metrics_collection: yes` results in module always reporting modifications
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> Similar to #61085, there is a comparison bug which is encountered when the module attempts to compare the value of "EnabledMetrics". The ordering of the input list might not match the API response's ordering for the existing ASG, which will cause the module to believe the ASG has been modified, even if it hasn't. This bug is only encountered when `metrics_collection: yes`. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ec2_asg ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.8.4 ``` ##### CONFIGURATION N/A ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Arch, Red Hat 7, CentOS 7 ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Set `metrics_collection: yes`, run task multiple times without any modification. Since the API response ordering does not match the default value, the resource will always appear as "modified". <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> If no changes are introduced, the task should not report any modifications. ##### ACTUAL RESULTS Ansible reports modifications to the ASGs.
https://github.com/ansible/ansible/issues/61195
https://github.com/ansible/ansible/pull/61284
d7604844c2a489bb13216dd6340345ac2bb1df7f
b8650c0a50eb76aa1146ea7119d3451e2253037f
2019-08-22T19:34:22Z
python
2019-09-06T19:48:40Z
lib/ansible/modules/cloud/amazon/ec2_asg.py
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: ec2_asg short_description: Create or delete AWS Autoscaling Groups description: - Can create or delete AWS Autoscaling Groups - Can be used with the ec2_lc module to manage Launch Configurations version_added: "1.6" author: "Gareth Rushgrove (@garethr)" requirements: [ "boto3", "botocore" ] options: state: description: - register or deregister the instance choices: ['present', 'absent'] default: present name: description: - Unique name for group to be created or deleted required: true load_balancers: description: - List of ELB names to use for the group. Use for classic load balancers. target_group_arns: description: - List of target group ARNs to use for the group. Use for application load balancers. version_added: "2.4" availability_zones: description: - List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set. launch_config_name: description: - Name of the Launch configuration to use for the group. See the ec2_lc module for managing these. If unspecified then the current group value will be used. One of launch_config_name or launch_template must be provided. launch_template: description: - Dictionary describing the Launch Template to use suboptions: version: description: - The version number of the launch template to use. Defaults to latest version if not provided. default: "latest" launch_template_name: description: - The name of the launch template. Only one of launch_template_name or launch_template_id is required. launch_template_id: description: - The id of the launch template. Only one of launch_template_name or launch_template_id is required. version_added: "2.8" min_size: description: - Minimum number of instances in group, if unspecified then the current group value will be used. max_size: description: - Maximum number of instances in group, if unspecified then the current group value will be used. placement_group: description: - Physical location of your cluster placement group created in Amazon EC2. version_added: "2.3" desired_capacity: description: - Desired number of instances in group, if unspecified then the current group value will be used. replace_all_instances: description: - In a rolling fashion, replace all instances that used the old launch configuration with one from the new launch configuration. It increases the ASG size by C(replace_batch_size), waits for the new instances to be up and running. After that, it terminates a batch of old instances, waits for the replacements, and repeats, until all old instances are replaced. Once that's done the ASG size is reduced back to the expected size. version_added: "1.8" default: 'no' type: bool replace_batch_size: description: - Number of instances you'd like to replace at a time. Used with replace_all_instances. required: false version_added: "1.8" default: 1 replace_instances: description: - List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration. version_added: "1.8" lc_check: description: - Check to make sure instances that are being replaced with replace_instances do not already have the current launch_config. version_added: "1.8" default: 'yes' type: bool lt_check: description: - Check to make sure instances that are being replaced with replace_instances do not already have the current launch_template or launch_template version. version_added: "2.8" default: 'yes' type: bool vpc_zone_identifier: description: - List of VPC subnets to use tags: description: - A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true. version_added: "1.7" health_check_period: description: - Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. required: false default: 300 seconds version_added: "1.7" health_check_type: description: - The service you want the health status from, Amazon EC2 or Elastic Load Balancer. required: false default: EC2 version_added: "1.7" choices: ['EC2', 'ELB'] default_cooldown: description: - The number of seconds after a scaling activity completes before another can begin. default: 300 seconds version_added: "2.0" wait_timeout: description: - How long to wait for instances to become viable when replaced. If you experience the error "Waited too long for ELB instances to be healthy", try increasing this value. default: 300 version_added: "1.8" wait_for_instances: description: - Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all instances have a lifecycle_state of "InService" and a health_status of "Healthy". version_added: "1.9" default: 'yes' type: bool termination_policies: description: - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity. - For 'Default', when used to create a new autoscaling group, the "Default"i value is used. When used to change an existent autoscaling group, the current termination policies are maintained. default: Default choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default'] version_added: "2.0" notification_topic: description: - A SNS topic ARN to send auto scaling notifications to. version_added: "2.2" notification_types: description: - A list of auto scaling events to trigger notifications on. default: - 'autoscaling:EC2_INSTANCE_LAUNCH' - 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR' - 'autoscaling:EC2_INSTANCE_TERMINATE' - 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' required: false version_added: "2.2" suspend_processes: description: - A list of scaling processes to suspend. default: [] choices: ['Launch', 'Terminate', 'HealthCheck', 'ReplaceUnhealthy', 'AZRebalance', 'AlarmNotification', 'ScheduledActions', 'AddToLoadBalancer'] version_added: "2.3" metrics_collection: description: - Enable ASG metrics collection type: bool default: 'no' version_added: "2.6" metrics_granularity: description: - When metrics_collection is enabled this will determine granularity of metrics collected by CloudWatch default: "1minute" version_added: "2.6" metrics_list: description: - List of autoscaling metrics to collect when enabling metrics_collection default: - 'GroupMinSize' - 'GroupMaxSize' - 'GroupDesiredCapacity' - 'GroupInServiceInstances' - 'GroupPendingInstances' - 'GroupStandbyInstances' - 'GroupTerminatingInstances' - 'GroupTotalInstances' version_added: "2.6" extends_documentation_fragment: - aws - ec2 """ EXAMPLES = ''' # Basic configuration with Launch Configuration - ec2_asg: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] launch_config_name: 'lc-1' min_size: 1 max_size: 10 desired_capacity: 5 vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] tags: - environment: production propagate_at_launch: no # Rolling ASG Updates # Below is an example of how to assign a new launch config to an ASG and terminate old instances. # # All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in # a rolling fashion with instances using the current launch configuration, "my_new_lc". # # This could also be considered a rolling deploy of a pre-baked AMI. # # If this is a newly created group, the instances will not be replaced since all instances # will have the current launch configuration. - name: create launch config ec2_lc: name: my_new_lc image_id: ami-lkajsf key_name: mykey region: us-east-1 security_groups: sg-23423 instance_type: m1.small assign_public_ip: yes - ec2_asg: name: myasg launch_config_name: my_new_lc health_check_period: 60 health_check_type: ELB replace_all_instances: yes min_size: 5 max_size: 5 desired_capacity: 5 region: us-east-1 # To only replace a couple of instances instead of all of them, supply a list # to "replace_instances": - ec2_asg: name: myasg launch_config_name: my_new_lc health_check_period: 60 health_check_type: ELB replace_instances: - i-b345231 - i-24c2931 min_size: 5 max_size: 5 desired_capacity: 5 region: us-east-1 # Basic Configuration with Launch Template - ec2_asg: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] launch_template: version: '1' launch_template_name: 'lt-example' launch_template_id: 'lt-123456' min_size: 1 max_size: 10 desired_capacity: 5 vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] tags: - environment: production propagate_at_launch: no ''' RETURN = ''' --- auto_scaling_group_name: description: The unique name of the auto scaling group returned: success type: str sample: "myasg" auto_scaling_group_arn: description: The unique ARN of the autoscaling group returned: success type: str sample: "arn:aws:autoscaling:us-east-1:123456789012:autoScalingGroup:6a09ad6d-eeee-1234-b987-ee123ced01ad:autoScalingGroupName/myasg" availability_zones: description: The availability zones for the auto scaling group returned: success type: list sample: [ "us-east-1d" ] created_time: description: Timestamp of create time of the auto scaling group returned: success type: str sample: "2017-11-08T14:41:48.272000+00:00" default_cooldown: description: The default cooldown time in seconds. returned: success type: int sample: 300 desired_capacity: description: The number of EC2 instances that should be running in this group. returned: success type: int sample: 3 healthcheck_period: description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. returned: success type: int sample: 30 healthcheck_type: description: The service you want the health status from, one of "EC2" or "ELB". returned: success type: str sample: "ELB" healthy_instances: description: Number of instances in a healthy state returned: success type: int sample: 5 in_service_instances: description: Number of instances in service returned: success type: int sample: 3 instance_facts: description: Dictionary of EC2 instances and their status as it relates to the ASG. returned: success type: dict sample: { "i-0123456789012": { "health_status": "Healthy", "launch_config_name": "public-webapp-production-1", "lifecycle_state": "InService" } } instances: description: list of instance IDs in the ASG returned: success type: list sample: [ "i-0123456789012" ] launch_config_name: description: > Name of launch configuration associated with the ASG. Same as launch_configuration_name, provided for compatibility with ec2_asg module. returned: success type: str sample: "public-webapp-production-1" load_balancers: description: List of load balancers names attached to the ASG. returned: success type: list sample: ["elb-webapp-prod"] max_size: description: Maximum size of group returned: success type: int sample: 3 min_size: description: Minimum size of group returned: success type: int sample: 1 pending_instances: description: Number of instances in pending state returned: success type: int sample: 1 tags: description: List of tags for the ASG, and whether or not each tag propagates to instances at launch. returned: success type: list sample: [ { "key": "Name", "value": "public-webapp-production-1", "resource_id": "public-webapp-production-1", "resource_type": "auto-scaling-group", "propagate_at_launch": "true" }, { "key": "env", "value": "production", "resource_id": "public-webapp-production-1", "resource_type": "auto-scaling-group", "propagate_at_launch": "true" } ] target_group_arns: description: List of ARNs of the target groups that the ASG populates returned: success type: list sample: [ "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b", "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234" ] target_group_names: description: List of names of the target groups that the ASG populates returned: success type: list sample: [ "target-group-host-hello", "target-group-path-world" ] termination_policies: description: A list of termination policies for the group. returned: success type: str sample: ["Default"] unhealthy_instances: description: Number of instances in an unhealthy state returned: success type: int sample: 0 viable_instances: description: Number of instances in a viable state returned: success type: int sample: 1 vpc_zone_identifier: description: VPC zone ID / subnet id for the auto scaling group returned: success type: str sample: "subnet-a31ef45f" metrics_collection: description: List of enabled AutosSalingGroup metrics returned: success type: list sample: [ { "Granularity": "1Minute", "Metric": "GroupInServiceInstances" } ] ''' import time import traceback from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, HAS_BOTO3, camel_dict_to_snake_dict, get_aws_connection_info, AWSRetry try: import botocore except ImportError: pass # will be detected by imported HAS_BOTO3 ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', 'LoadBalancerNames', 'MaxSize', 'MinSize', 'AutoScalingGroupName', 'PlacementGroup', 'TerminationPolicies', 'VPCZoneIdentifier') INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') backoff_params = dict(tries=10, delay=3, backoff=1.5) @AWSRetry.backoff(**backoff_params) def describe_autoscaling_groups(connection, group_name): pg = connection.get_paginator('describe_auto_scaling_groups') return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get('AutoScalingGroups', []) @AWSRetry.backoff(**backoff_params) def deregister_lb_instances(connection, lb_name, instance_id): connection.deregister_instances_from_load_balancer(LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)]) @AWSRetry.backoff(**backoff_params) def describe_instance_health(connection, lb_name, instances): params = dict(LoadBalancerName=lb_name) if instances: params.update(Instances=instances) return connection.describe_instance_health(**params) @AWSRetry.backoff(**backoff_params) def describe_target_health(connection, target_group_arn, instances): return connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=instances) @AWSRetry.backoff(**backoff_params) def suspend_asg_processes(connection, asg_name, processes): connection.suspend_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes) @AWSRetry.backoff(**backoff_params) def resume_asg_processes(connection, asg_name, processes): connection.resume_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes) @AWSRetry.backoff(**backoff_params) def describe_launch_configurations(connection, launch_config_name): pg = connection.get_paginator('describe_launch_configurations') return pg.paginate(LaunchConfigurationNames=[launch_config_name]).build_full_result() @AWSRetry.backoff(**backoff_params) def describe_launch_templates(connection, launch_template): if launch_template['launch_template_id'] is not None: try: lt = connection.describe_launch_templates(LaunchTemplateIds=[launch_template['launch_template_id']]) return lt except (botocore.exceptions.ClientError) as e: module.fail_json(msg="No launch template found matching: %s" % launch_template) else: try: lt = connection.describe_launch_templates(LaunchTemplateNames=[launch_template['launch_template_name']]) return lt except (botocore.exceptions.ClientError) as e: module.fail_json(msg="No launch template found matching: %s" % launch_template) @AWSRetry.backoff(**backoff_params) def create_asg(connection, **params): connection.create_auto_scaling_group(**params) @AWSRetry.backoff(**backoff_params) def put_notification_config(connection, asg_name, topic_arn, notification_types): connection.put_notification_configuration( AutoScalingGroupName=asg_name, TopicARN=topic_arn, NotificationTypes=notification_types ) @AWSRetry.backoff(**backoff_params) def del_notification_config(connection, asg_name, topic_arn): connection.delete_notification_configuration( AutoScalingGroupName=asg_name, TopicARN=topic_arn ) @AWSRetry.backoff(**backoff_params) def attach_load_balancers(connection, asg_name, load_balancers): connection.attach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers) @AWSRetry.backoff(**backoff_params) def detach_load_balancers(connection, asg_name, load_balancers): connection.detach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers) @AWSRetry.backoff(**backoff_params) def attach_lb_target_groups(connection, asg_name, target_group_arns): connection.attach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns) @AWSRetry.backoff(**backoff_params) def detach_lb_target_groups(connection, asg_name, target_group_arns): connection.detach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns) @AWSRetry.backoff(**backoff_params) def update_asg(connection, **params): connection.update_auto_scaling_group(**params) @AWSRetry.backoff(catch_extra_error_codes=['ScalingActivityInProgress'], **backoff_params) def delete_asg(connection, asg_name, force_delete): connection.delete_auto_scaling_group(AutoScalingGroupName=asg_name, ForceDelete=force_delete) @AWSRetry.backoff(**backoff_params) def terminate_asg_instance(connection, instance_id, decrement_capacity): connection.terminate_instance_in_auto_scaling_group(InstanceId=instance_id, ShouldDecrementDesiredCapacity=decrement_capacity) def enforce_required_arguments_for_create(): ''' As many arguments are not required for autoscale group deletion they cannot be mandatory arguments for the module, so we enforce them here ''' missing_args = [] if module.params.get('launch_config_name') is None and module.params.get('launch_template') is None: module.fail_json(msg="Missing either launch_config_name or launch_template for autoscaling group create") for arg in ('min_size', 'max_size'): if module.params[arg] is None: missing_args.append(arg) if missing_args: module.fail_json(msg="Missing required arguments for autoscaling group create: %s" % ",".join(missing_args)) def get_properties(autoscaling_group): properties = dict() properties['healthy_instances'] = 0 properties['in_service_instances'] = 0 properties['unhealthy_instances'] = 0 properties['pending_instances'] = 0 properties['viable_instances'] = 0 properties['terminating_instances'] = 0 instance_facts = dict() autoscaling_group_instances = autoscaling_group.get('Instances') if autoscaling_group_instances: properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances] for i in autoscaling_group_instances: if i.get('LaunchConfigurationName'): instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'], 'lifecycle_state': i['LifecycleState'], 'launch_config_name': i['LaunchConfigurationName']} elif i.get('LaunchTemplate'): instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'], 'lifecycle_state': i['LifecycleState'], 'launch_template': i['LaunchTemplate']} else: instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'], 'lifecycle_state': i['LifecycleState']} if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService': properties['viable_instances'] += 1 if i['HealthStatus'] == 'Healthy': properties['healthy_instances'] += 1 else: properties['unhealthy_instances'] += 1 if i['LifecycleState'] == 'InService': properties['in_service_instances'] += 1 if i['LifecycleState'] == 'Terminating': properties['terminating_instances'] += 1 if i['LifecycleState'] == 'Pending': properties['pending_instances'] += 1 else: properties['instances'] = [] properties['auto_scaling_group_name'] = autoscaling_group.get('AutoScalingGroupName') properties['auto_scaling_group_arn'] = autoscaling_group.get('AutoScalingGroupARN') properties['availability_zones'] = autoscaling_group.get('AvailabilityZones') properties['created_time'] = autoscaling_group.get('CreatedTime') properties['instance_facts'] = instance_facts properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames') if autoscaling_group.get('LaunchConfigurationName'): properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName') else: properties['launch_template'] = autoscaling_group.get('LaunchTemplate') properties['tags'] = autoscaling_group.get('Tags') properties['min_size'] = autoscaling_group.get('MinSize') properties['max_size'] = autoscaling_group.get('MaxSize') properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity') properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown') properties['healthcheck_grace_period'] = autoscaling_group.get('HealthCheckGracePeriod') properties['healthcheck_type'] = autoscaling_group.get('HealthCheckType') properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown') properties['termination_policies'] = autoscaling_group.get('TerminationPolicies') properties['target_group_arns'] = autoscaling_group.get('TargetGroupARNs') properties['vpc_zone_identifier'] = autoscaling_group.get('VPCZoneIdentifier') properties['metrics_collection'] = autoscaling_group.get('EnabledMetrics') if properties['target_group_arns']: region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) elbv2_connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) tg_paginator = elbv2_connection.get_paginator('describe_target_groups') tg_result = tg_paginator.paginate(TargetGroupArns=properties['target_group_arns']).build_full_result() target_groups = tg_result['TargetGroups'] else: target_groups = [] properties['target_group_names'] = [tg['TargetGroupName'] for tg in target_groups] return properties def get_launch_object(connection, ec2_connection): launch_object = dict() launch_config_name = module.params.get('launch_config_name') launch_template = module.params.get('launch_template') if launch_config_name is None and launch_template is None: return launch_object elif launch_config_name: try: launch_configs = describe_launch_configurations(connection, launch_config_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Failed to describe launch configurations", exception=traceback.format_exc()) if len(launch_configs['LaunchConfigurations']) == 0: module.fail_json(msg="No launch config found with name %s" % launch_config_name) launch_object = {"LaunchConfigurationName": launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName']} return launch_object elif launch_template: lt = describe_launch_templates(ec2_connection, launch_template)['LaunchTemplates'][0] if launch_template['version'] is not None: launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": launch_template['version']}} else: launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": str(lt['LatestVersionNumber'])}} return launch_object def elb_dreg(asg_connection, group_name, instance_id): region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) as_group = describe_autoscaling_groups(asg_connection, group_name)[0] wait_timeout = module.params.get('wait_timeout') count = 1 if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB': elb_connection = boto3_conn(module, conn_type='client', resource='elb', region=region, endpoint=ec2_url, **aws_connect_params) else: return for lb in as_group['LoadBalancerNames']: deregister_lb_instances(elb_connection, lb, instance_id) module.debug("De-registering %s from ELB %s" % (instance_id, lb)) wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and count > 0: count = 0 for lb in as_group['LoadBalancerNames']: lb_instances = describe_instance_health(elb_connection, lb, []) for i in lb_instances['InstanceStates']: if i['InstanceId'] == instance_id and i['State'] == "InService": count += 1 module.debug("%s: %s, %s" % (i['InstanceId'], i['State'], i['Description'])) time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for instance to deregister. {0}".format(time.asctime())) def elb_healthy(asg_connection, elb_connection, group_name): healthy_instances = set() as_group = describe_autoscaling_groups(asg_connection, group_name)[0] props = get_properties(as_group) # get healthy, inservice instances from ASG instances = [] for instance, settings in props['instance_facts'].items(): if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': instances.append(dict(InstanceId=instance)) module.debug("ASG considers the following instances InService and Healthy: %s" % instances) module.debug("ELB instance status:") lb_instances = list() for lb in as_group.get('LoadBalancerNames'): # we catch a race condition that sometimes happens if the instance exists in the ASG # but has not yet show up in the ELB try: lb_instances = describe_instance_health(elb_connection, lb, instances) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'InvalidInstance': return None module.fail_json(msg="Failed to get load balancer.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except botocore.exceptions.BotoCoreError as e: module.fail_json(msg="Failed to get load balancer.", exception=traceback.format_exc()) for i in lb_instances.get('InstanceStates'): if i['State'] == "InService": healthy_instances.add(i['InstanceId']) module.debug("ELB Health State %s: %s" % (i['InstanceId'], i['State'])) return len(healthy_instances) def tg_healthy(asg_connection, elbv2_connection, group_name): healthy_instances = set() as_group = describe_autoscaling_groups(asg_connection, group_name)[0] props = get_properties(as_group) # get healthy, inservice instances from ASG instances = [] for instance, settings in props['instance_facts'].items(): if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': instances.append(dict(Id=instance)) module.debug("ASG considers the following instances InService and Healthy: %s" % instances) module.debug("Target Group instance status:") tg_instances = list() for tg in as_group.get('TargetGroupARNs'): # we catch a race condition that sometimes happens if the instance exists in the ASG # but has not yet show up in the ELB try: tg_instances = describe_target_health(elbv2_connection, tg, instances) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'InvalidInstance': return None module.fail_json(msg="Failed to get target group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except botocore.exceptions.BotoCoreError as e: module.fail_json(msg="Failed to get target group.", exception=traceback.format_exc()) for i in tg_instances.get('TargetHealthDescriptions'): if i['TargetHealth']['State'] == "healthy": healthy_instances.add(i['Target']['Id']) module.debug("Target Group Health State %s: %s" % (i['Target']['Id'], i['TargetHealth']['State'])) return len(healthy_instances) def wait_for_elb(asg_connection, group_name): region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) wait_timeout = module.params.get('wait_timeout') # if the health_check_type is ELB, we want to query the ELBs directly for instance # status as to avoid health_check_grace period that is awarded to ASG instances as_group = describe_autoscaling_groups(asg_connection, group_name)[0] if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB': module.debug("Waiting for ELB to consider instances healthy.") elb_connection = boto3_conn(module, conn_type='client', resource='elb', region=region, endpoint=ec2_url, **aws_connect_params) wait_timeout = time.time() + wait_timeout healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) module.debug("ELB thinks %s instances are healthy." % healthy_instances) time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) module.debug("Waiting complete. ELB thinks %s instances are healthy." % healthy_instances) def wait_for_target_group(asg_connection, group_name): region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) wait_timeout = module.params.get('wait_timeout') # if the health_check_type is ELB, we want to query the ELBs directly for instance # status as to avoid health_check_grace period that is awarded to ASG instances as_group = describe_autoscaling_groups(asg_connection, group_name)[0] if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB': module.debug("Waiting for Target Group to consider instances healthy.") elbv2_connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) wait_timeout = time.time() + wait_timeout healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) module.debug("Target Group thinks %s instances are healthy." % healthy_instances) time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) module.debug("Waiting complete. Target Group thinks %s instances are healthy." % healthy_instances) def suspend_processes(ec2_connection, as_group): suspend_processes = set(module.params.get('suspend_processes')) try: suspended_processes = set([p['ProcessName'] for p in as_group['SuspendedProcesses']]) except AttributeError: # New ASG being created, no suspended_processes defined yet suspended_processes = set() if suspend_processes == suspended_processes: return False resume_processes = list(suspended_processes - suspend_processes) if resume_processes: resume_asg_processes(ec2_connection, module.params.get('name'), resume_processes) if suspend_processes: suspend_asg_processes(ec2_connection, module.params.get('name'), list(suspend_processes)) return True def create_autoscaling_group(connection): group_name = module.params.get('name') load_balancers = module.params['load_balancers'] target_group_arns = module.params['target_group_arns'] availability_zones = module.params['availability_zones'] launch_config_name = module.params.get('launch_config_name') launch_template = module.params.get('launch_template') min_size = module.params['min_size'] max_size = module.params['max_size'] placement_group = module.params.get('placement_group') desired_capacity = module.params.get('desired_capacity') vpc_zone_identifier = module.params.get('vpc_zone_identifier') set_tags = module.params.get('tags') health_check_period = module.params.get('health_check_period') health_check_type = module.params.get('health_check_type') default_cooldown = module.params.get('default_cooldown') wait_for_instances = module.params.get('wait_for_instances') wait_timeout = module.params.get('wait_timeout') termination_policies = module.params.get('termination_policies') notification_topic = module.params.get('notification_topic') notification_types = module.params.get('notification_types') metrics_collection = module.params.get('metrics_collection') metrics_granularity = module.params.get('metrics_granularity') metrics_list = module.params.get('metrics_list') try: as_groups = describe_autoscaling_groups(connection, group_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Failed to describe auto scaling groups.", exception=traceback.format_exc()) region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) ec2_connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) if vpc_zone_identifier: vpc_zone_identifier = ','.join(vpc_zone_identifier) asg_tags = [] for tag in set_tags: for k, v in tag.items(): if k != 'propagate_at_launch': asg_tags.append(dict(Key=k, Value=to_native(v), PropagateAtLaunch=bool(tag.get('propagate_at_launch', True)), ResourceType='auto-scaling-group', ResourceId=group_name)) if not as_groups: if not vpc_zone_identifier and not availability_zones: availability_zones = module.params['availability_zones'] = [zone['ZoneName'] for zone in ec2_connection.describe_availability_zones()['AvailabilityZones']] enforce_required_arguments_for_create() if desired_capacity is None: desired_capacity = min_size ag = dict( AutoScalingGroupName=group_name, MinSize=min_size, MaxSize=max_size, DesiredCapacity=desired_capacity, Tags=asg_tags, HealthCheckGracePeriod=health_check_period, HealthCheckType=health_check_type, DefaultCooldown=default_cooldown, TerminationPolicies=termination_policies) if vpc_zone_identifier: ag['VPCZoneIdentifier'] = vpc_zone_identifier if availability_zones: ag['AvailabilityZones'] = availability_zones if placement_group: ag['PlacementGroup'] = placement_group if load_balancers: ag['LoadBalancerNames'] = load_balancers if target_group_arns: ag['TargetGroupARNs'] = target_group_arns launch_object = get_launch_object(connection, ec2_connection) if 'LaunchConfigurationName' in launch_object: ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName'] elif 'LaunchTemplate' in launch_object: ag['LaunchTemplate'] = launch_object['LaunchTemplate'] else: module.fail_json(msg="Missing LaunchConfigurationName or LaunchTemplate", exception=traceback.format_exc()) try: create_asg(connection, **ag) if metrics_collection: connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list) all_ag = describe_autoscaling_groups(connection, group_name) if len(all_ag) == 0: module.fail_json(msg="No auto scaling group found with the name %s" % group_name) as_group = all_ag[0] suspend_processes(connection, as_group) if wait_for_instances: wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances') if load_balancers: wait_for_elb(connection, group_name) # Wait for target group health if target group(s)defined if target_group_arns: wait_for_target_group(connection, group_name) if notification_topic: put_notification_config(connection, group_name, notification_topic, notification_types) as_group = describe_autoscaling_groups(connection, group_name)[0] asg_properties = get_properties(as_group) changed = True return changed, asg_properties except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to create Autoscaling Group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except botocore.exceptions.BotoCoreError as e: module.fail_json(msg="Failed to create Autoscaling Group.", exception=traceback.format_exc()) else: as_group = as_groups[0] initial_asg_properties = get_properties(as_group) changed = False if suspend_processes(connection, as_group): changed = True # process tag changes if len(set_tags) > 0: have_tags = as_group.get('Tags') want_tags = asg_tags dead_tags = [] have_tag_keyvals = [x['Key'] for x in have_tags] want_tag_keyvals = [x['Key'] for x in want_tags] for dead_tag in set(have_tag_keyvals).difference(want_tag_keyvals): changed = True dead_tags.append(dict(ResourceId=as_group['AutoScalingGroupName'], ResourceType='auto-scaling-group', Key=dead_tag)) have_tags = [have_tag for have_tag in have_tags if have_tag['Key'] != dead_tag] if dead_tags: connection.delete_tags(Tags=dead_tags) zipped = zip(have_tags, want_tags) if len(have_tags) != len(want_tags) or not all(x == y for x, y in zipped): changed = True connection.create_or_update_tags(Tags=asg_tags) # Handle load balancer attachments/detachments # Attach load balancers if they are specified but none currently exist if load_balancers and not as_group['LoadBalancerNames']: changed = True try: attach_load_balancers(connection, group_name, load_balancers) except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to update Autoscaling Group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except botocore.exceptions.BotoCoreError as e: module.fail_json(msg="Failed to update Autoscaling Group.", exception=traceback.format_exc()) # Update load balancers if they are specified and one or more already exists elif as_group['LoadBalancerNames']: change_load_balancers = load_balancers is not None # Get differences if not load_balancers: load_balancers = list() wanted_elbs = set(load_balancers) has_elbs = set(as_group['LoadBalancerNames']) # check if all requested are already existing if has_elbs - wanted_elbs and change_load_balancers: # if wanted contains less than existing, then we need to delete some elbs_to_detach = has_elbs.difference(wanted_elbs) if elbs_to_detach: changed = True try: detach_load_balancers(connection, group_name, list(elbs_to_detach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Failed to detach load balancers %s: %s." % (elbs_to_detach, to_native(e)), exception=traceback.format_exc()) if wanted_elbs - has_elbs: # if has contains less than wanted, then we need to add some elbs_to_attach = wanted_elbs.difference(has_elbs) if elbs_to_attach: changed = True try: attach_load_balancers(connection, group_name, list(elbs_to_attach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Failed to attach load balancers %s: %s." % (elbs_to_attach, to_native(e)), exception=traceback.format_exc()) # Handle target group attachments/detachments # Attach target groups if they are specified but none currently exist if target_group_arns and not as_group['TargetGroupARNs']: changed = True try: attach_lb_target_groups(connection, group_name, target_group_arns) except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to update Autoscaling Group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except botocore.exceptions.BotoCoreError as e: module.fail_json(msg="Failed to update Autoscaling Group.", exception=traceback.format_exc()) # Update target groups if they are specified and one or more already exists elif target_group_arns is not None and as_group['TargetGroupARNs']: # Get differences wanted_tgs = set(target_group_arns) has_tgs = set(as_group['TargetGroupARNs']) # check if all requested are already existing if has_tgs.issuperset(wanted_tgs): # if wanted contains less than existing, then we need to delete some tgs_to_detach = has_tgs.difference(wanted_tgs) if tgs_to_detach: changed = True try: detach_lb_target_groups(connection, group_name, list(tgs_to_detach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Failed to detach load balancer target groups %s: %s" % (tgs_to_detach, to_native(e)), exception=traceback.format_exc()) if wanted_tgs.issuperset(has_tgs): # if has contains less than wanted, then we need to add some tgs_to_attach = wanted_tgs.difference(has_tgs) if tgs_to_attach: changed = True try: attach_lb_target_groups(connection, group_name, list(tgs_to_attach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Failed to attach load balancer target groups %s: %s" % (tgs_to_attach, to_native(e)), exception=traceback.format_exc()) # check for attributes that aren't required for updating an existing ASG # check if min_size/max_size/desired capacity have been specified and if not use ASG values if min_size is None: min_size = as_group['MinSize'] if max_size is None: max_size = as_group['MaxSize'] if desired_capacity is None: desired_capacity = as_group['DesiredCapacity'] ag = dict( AutoScalingGroupName=group_name, MinSize=min_size, MaxSize=max_size, DesiredCapacity=desired_capacity, HealthCheckGracePeriod=health_check_period, HealthCheckType=health_check_type, DefaultCooldown=default_cooldown, TerminationPolicies=termination_policies) # Get the launch object (config or template) if one is provided in args or use the existing one attached to ASG if not. launch_object = get_launch_object(connection, ec2_connection) if 'LaunchConfigurationName' in launch_object: ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName'] elif 'LaunchTemplate' in launch_object: ag['LaunchTemplate'] = launch_object['LaunchTemplate'] else: try: ag['LaunchConfigurationName'] = as_group['LaunchConfigurationName'] except Exception: launch_template = as_group['LaunchTemplate'] # Prefer LaunchTemplateId over Name as it's more specific. Only one can be used for update_asg. ag['LaunchTemplate'] = {"LaunchTemplateId": launch_template['LaunchTemplateId'], "Version": launch_template['Version']} if availability_zones: ag['AvailabilityZones'] = availability_zones if vpc_zone_identifier: ag['VPCZoneIdentifier'] = vpc_zone_identifier try: update_asg(connection, **ag) if metrics_collection: connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list) else: connection.disable_metrics_collection(AutoScalingGroupName=group_name, Metrics=metrics_list) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Failed to update autoscaling group: %s" % to_native(e), exception=traceback.format_exc()) if notification_topic: try: put_notification_config(connection, group_name, notification_topic, notification_types) except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to update Autoscaling Group notifications.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except botocore.exceptions.BotoCoreError as e: module.fail_json(msg="Failed to update Autoscaling Group notifications.", exception=traceback.format_exc()) if wait_for_instances: wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances') # Wait for ELB health if ELB(s)defined if load_balancers: module.debug('\tWAITING FOR ELB HEALTH') wait_for_elb(connection, group_name) # Wait for target group health if target group(s)defined if target_group_arns: module.debug('\tWAITING FOR TG HEALTH') wait_for_target_group(connection, group_name) try: as_group = describe_autoscaling_groups(connection, group_name)[0] asg_properties = get_properties(as_group) if asg_properties != initial_asg_properties: changed = True except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to read existing Autoscaling Groups.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except botocore.exceptions.BotoCoreError as e: module.fail_json(msg="Failed to read existing Autoscaling Groups.", exception=traceback.format_exc()) return changed, asg_properties def delete_autoscaling_group(connection): group_name = module.params.get('name') notification_topic = module.params.get('notification_topic') wait_for_instances = module.params.get('wait_for_instances') wait_timeout = module.params.get('wait_timeout') if notification_topic: del_notification_config(connection, group_name, notification_topic) groups = describe_autoscaling_groups(connection, group_name) if groups: wait_timeout = time.time() + wait_timeout if not wait_for_instances: delete_asg(connection, group_name, force_delete=True) else: updated_params = dict(AutoScalingGroupName=group_name, MinSize=0, MaxSize=0, DesiredCapacity=0) update_asg(connection, **updated_params) instances = True while instances and wait_for_instances and wait_timeout >= time.time(): tmp_groups = describe_autoscaling_groups(connection, group_name) if tmp_groups: tmp_group = tmp_groups[0] if not tmp_group.get('Instances'): instances = False time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) delete_asg(connection, group_name, force_delete=False) while describe_autoscaling_groups(connection, group_name) and wait_timeout >= time.time(): time.sleep(5) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for ASG to delete. %s" % time.asctime()) return True return False def get_chunks(l, n): for i in range(0, len(l), n): yield l[i:i + n] def update_size(connection, group, max_size, min_size, dc): module.debug("setting ASG sizes") module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size)) updated_group = dict() updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName'] updated_group['MinSize'] = min_size updated_group['MaxSize'] = max_size updated_group['DesiredCapacity'] = dc update_asg(connection, **updated_group) def replace(connection): batch_size = module.params.get('replace_batch_size') wait_timeout = module.params.get('wait_timeout') group_name = module.params.get('name') max_size = module.params.get('max_size') min_size = module.params.get('min_size') desired_capacity = module.params.get('desired_capacity') launch_config_name = module.params.get('launch_config_name') # Required to maintain the default value being set to 'true' if launch_config_name: lc_check = module.params.get('lc_check') else: lc_check = False # Mirror above behaviour for Launch Templates launch_template = module.params.get('launch_template') if launch_template: lt_check = module.params.get('lt_check') else: lt_check = False replace_instances = module.params.get('replace_instances') replace_all_instances = module.params.get('replace_all_instances') as_group = describe_autoscaling_groups(connection, group_name)[0] if desired_capacity is None: desired_capacity = as_group['DesiredCapacity'] wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances') props = get_properties(as_group) instances = props['instances'] if replace_all_instances: # If replacing all instances, then set replace_instances to current set # This allows replace_instances and replace_all_instances to behave same replace_instances = instances if replace_instances: instances = replace_instances # check to see if instances are replaceable if checking launch configs if launch_config_name: new_instances, old_instances = get_instances_by_launch_config(props, lc_check, instances) elif launch_template: new_instances, old_instances = get_instances_by_launch_template(props, lt_check, instances) num_new_inst_needed = desired_capacity - len(new_instances) if lc_check or lt_check: if num_new_inst_needed == 0 and old_instances: module.debug("No new instances needed, but old instances are present. Removing old instances") terminate_batch(connection, old_instances, instances, True) as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) changed = True return(changed, props) # we don't want to spin up extra instances if not necessary if num_new_inst_needed < batch_size: module.debug("Overriding batch size to %s" % num_new_inst_needed) batch_size = num_new_inst_needed if not old_instances: changed = False return(changed, props) # check if min_size/max_size/desired capacity have been specified and if not use ASG values if min_size is None: min_size = as_group['MinSize'] if max_size is None: max_size = as_group['MaxSize'] # set temporary settings and wait for them to be reached # This should get overwritten if the number of instances left is less than the batch size. as_group = describe_autoscaling_groups(connection, group_name)[0] update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size) wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances') wait_for_elb(connection, group_name) wait_for_target_group(connection, group_name) as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) instances = props['instances'] if replace_instances: instances = replace_instances module.debug("beginning main loop") for i in get_chunks(instances, batch_size): # break out of this loop if we have enough new instances break_early, desired_size, term_instances = terminate_batch(connection, i, instances, False) wait_for_term_inst(connection, term_instances) wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances') wait_for_elb(connection, group_name) wait_for_target_group(connection, group_name) as_group = describe_autoscaling_groups(connection, group_name)[0] if break_early: module.debug("breaking loop") break update_size(connection, as_group, max_size, min_size, desired_capacity) as_group = describe_autoscaling_groups(connection, group_name)[0] asg_properties = get_properties(as_group) module.debug("Rolling update complete.") changed = True return(changed, asg_properties) def get_instances_by_launch_config(props, lc_check, initial_instances): new_instances = [] old_instances = [] # old instances are those that have the old launch config if lc_check: for i in props['instances']: # Check if migrating from launch_template to launch_config first if 'launch_template' in props['instance_facts'][i]: old_instances.append(i) elif props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']: new_instances.append(i) else: old_instances.append(i) else: module.debug("Comparing initial instances with current: %s" % initial_instances) for i in props['instances']: if i not in initial_instances: new_instances.append(i) else: old_instances.append(i) module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) return new_instances, old_instances def get_instances_by_launch_template(props, lt_check, initial_instances): new_instances = [] old_instances = [] # old instances are those that have the old launch template or version of the same launch templatec if lt_check: for i in props['instances']: # Check if migrating from launch_config_name to launch_template_name first if 'launch_config_name' in props['instance_facts'][i]: old_instances.append(i) elif props['instance_facts'][i]['launch_template'] == props['launch_template']: new_instances.append(i) else: old_instances.append(i) else: module.debug("Comparing initial instances with current: %s" % initial_instances) for i in props['instances']: if i not in initial_instances: new_instances.append(i) else: old_instances.append(i) module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) return new_instances, old_instances def list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances): instances_to_terminate = [] instances = (inst_id for inst_id in replace_instances if inst_id in props['instances']) # check to make sure instances given are actually in the given ASG # and they have a non-current launch config if module.params.get('launch_config_name'): if lc_check: for i in instances: if 'launch_template' in props['instance_facts'][i]: instances_to_terminate.append(i) elif props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']: instances_to_terminate.append(i) else: for i in instances: if i in initial_instances: instances_to_terminate.append(i) elif module.params.get('launch_template'): if lt_check: for i in instances: if 'launch_config_name' in props['instance_facts'][i]: instances_to_terminate.append(i) elif props['instance_facts'][i]['launch_template'] != props['launch_template']: instances_to_terminate.append(i) else: for i in instances: if i in initial_instances: instances_to_terminate.append(i) return instances_to_terminate def terminate_batch(connection, replace_instances, initial_instances, leftovers=False): batch_size = module.params.get('replace_batch_size') min_size = module.params.get('min_size') desired_capacity = module.params.get('desired_capacity') group_name = module.params.get('name') lc_check = module.params.get('lc_check') lt_check = module.params.get('lt_check') decrement_capacity = False break_loop = False as_group = describe_autoscaling_groups(connection, group_name)[0] if desired_capacity is None: desired_capacity = as_group['DesiredCapacity'] props = get_properties(as_group) desired_size = as_group['MinSize'] if module.params.get('launch_config_name'): new_instances, old_instances = get_instances_by_launch_config(props, lc_check, initial_instances) else: new_instances, old_instances = get_instances_by_launch_template(props, lt_check, initial_instances) num_new_inst_needed = desired_capacity - len(new_instances) # check to make sure instances given are actually in the given ASG # and they have a non-current launch config instances_to_terminate = list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances) module.debug("new instances needed: %s" % num_new_inst_needed) module.debug("new instances: %s" % new_instances) module.debug("old instances: %s" % old_instances) module.debug("batch instances: %s" % ",".join(instances_to_terminate)) if num_new_inst_needed == 0: decrement_capacity = True if as_group['MinSize'] != min_size: if min_size is None: min_size = as_group['MinSize'] updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size) update_asg(connection, **updated_params) module.debug("Updating minimum size back to original of %s" % min_size) # if are some leftover old instances, but we are already at capacity with new ones # we don't want to decrement capacity if leftovers: decrement_capacity = False break_loop = True instances_to_terminate = old_instances desired_size = min_size module.debug("No new instances needed") if num_new_inst_needed < batch_size and num_new_inst_needed != 0: instances_to_terminate = instances_to_terminate[:num_new_inst_needed] decrement_capacity = False break_loop = False module.debug("%s new instances needed" % num_new_inst_needed) module.debug("decrementing capacity: %s" % decrement_capacity) for instance_id in instances_to_terminate: elb_dreg(connection, group_name, instance_id) module.debug("terminating instance: %s" % instance_id) terminate_asg_instance(connection, instance_id, decrement_capacity) # we wait to make sure the machines we marked as Unhealthy are # no longer in the list return break_loop, desired_size, instances_to_terminate def wait_for_term_inst(connection, term_instances): wait_timeout = module.params.get('wait_timeout') group_name = module.params.get('name') as_group = describe_autoscaling_groups(connection, group_name)[0] count = 1 wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and count > 0: module.debug("waiting for instances to terminate") count = 0 as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) instance_facts = props['instance_facts'] instances = (i for i in instance_facts if i in term_instances) for i in instances: lifecycle = instance_facts[i]['lifecycle_state'] health = instance_facts[i]['health_status'] module.debug("Instance %s has state of %s,%s" % (i, lifecycle, health)) if lifecycle.startswith('Terminating') or health == 'Unhealthy': count += 1 time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop): # make sure we have the latest stats after that last loop. as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) # now we make sure that we have enough instances in a viable state wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and desired_size > props[prop]: module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) time.sleep(10) as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime()) module.debug("Reached %s: %s" % (prop, desired_size)) return props def asg_exists(connection): group_name = module.params.get('name') as_group = describe_autoscaling_groups(connection, group_name) return bool(len(as_group)) def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True, type='str'), load_balancers=dict(type='list'), target_group_arns=dict(type='list'), availability_zones=dict(type='list'), launch_config_name=dict(type='str'), launch_template=dict(type='dict', default=None, options=dict( version=dict(type='str'), launch_template_name=dict(type='str'), launch_template_id=dict(type='str'), ), ), min_size=dict(type='int'), max_size=dict(type='int'), placement_group=dict(type='str'), desired_capacity=dict(type='int'), vpc_zone_identifier=dict(type='list'), replace_batch_size=dict(type='int', default=1), replace_all_instances=dict(type='bool', default=False), replace_instances=dict(type='list', default=[]), lc_check=dict(type='bool', default=True), lt_check=dict(type='bool', default=True), wait_timeout=dict(type='int', default=300), state=dict(default='present', choices=['present', 'absent']), tags=dict(type='list', default=[]), health_check_period=dict(type='int', default=300), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), default_cooldown=dict(type='int', default=300), wait_for_instances=dict(type='bool', default=True), termination_policies=dict(type='list', default='Default'), notification_topic=dict(type='str', default=None), notification_types=dict(type='list', default=[ 'autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' ]), suspend_processes=dict(type='list', default=[]), metrics_collection=dict(type='bool', default=False), metrics_granularity=dict(type='str', default='1Minute'), metrics_list=dict(type='list', default=[ 'GroupMinSize', 'GroupMaxSize', 'GroupDesiredCapacity', 'GroupInServiceInstances', 'GroupPendingInstances', 'GroupStandbyInstances', 'GroupTerminatingInstances', 'GroupTotalInstances' ]) ), ) global module module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ ['replace_all_instances', 'replace_instances'], ['launch_config_name', 'launch_template']] ) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') state = module.params.get('state') replace_instances = module.params.get('replace_instances') replace_all_instances = module.params.get('replace_all_instances') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_params) changed = create_changed = replace_changed = False exists = asg_exists(connection) if state == 'present': create_changed, asg_properties = create_autoscaling_group(connection) elif state == 'absent': changed = delete_autoscaling_group(connection) module.exit_json(changed=changed) # Only replace instances if asg existed at start of call if exists and (replace_all_instances or replace_instances) and (module.params.get('launch_config_name') or module.params.get('launch_template')): replace_changed, asg_properties = replace(connection) if create_changed or replace_changed: changed = True module.exit_json(changed=changed, **asg_properties) if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,195
Setting ec2_asg `metrics_collection: yes` results in module always reporting modifications
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> Similar to #61085, there is a comparison bug which is encountered when the module attempts to compare the value of "EnabledMetrics". The ordering of the input list might not match the API response's ordering for the existing ASG, which will cause the module to believe the ASG has been modified, even if it hasn't. This bug is only encountered when `metrics_collection: yes`. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ec2_asg ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.8.4 ``` ##### CONFIGURATION N/A ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Arch, Red Hat 7, CentOS 7 ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Set `metrics_collection: yes`, run task multiple times without any modification. Since the API response ordering does not match the default value, the resource will always appear as "modified". <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> If no changes are introduced, the task should not report any modifications. ##### ACTUAL RESULTS Ansible reports modifications to the ASGs.
https://github.com/ansible/ansible/issues/61195
https://github.com/ansible/ansible/pull/61284
d7604844c2a489bb13216dd6340345ac2bb1df7f
b8650c0a50eb76aa1146ea7119d3451e2253037f
2019-08-22T19:34:22Z
python
2019-09-06T19:48:40Z
test/integration/targets/ec2_asg/tasks/main.yml
--- # tasks file for test_ec2_asg - name: Test incomplete credentials with ec2_asg block: # ============================================================ - name: test invalid profile ec2_asg: name: "{{ resource_prefix }}-asg" region: "{{ aws_region }}" profile: notavalidprofile ignore_errors: yes register: result - name: assert: that: - "'The config profile (notavalidprofile) could not be found' in result.msg" - name: test partial credentials ec2_asg: name: "{{ resource_prefix }}-asg" region: "{{ aws_region }}" aws_access_key: "{{ aws_access_key }}" ignore_errors: yes register: result - name: assert: that: - "'Partial credentials found in explicit, missing: aws_secret_access_key' in result.msg" - name: test without specifying region ec2_asg: name: "{{ resource_prefix }}-asg" aws_access_key: "{{ aws_access_key }}" aws_secret_key: "{{ aws_secret_key }}" security_token: "{{ security_token | default(omit) }}" ignore_errors: yes register: result - name: assert: that: - result.msg == 'The ec2_asg module requires a region and none was found in configuration, environment variables or module parameters' # ============================================================ - name: Test incomplete arguments with ec2_asg block: # ============================================================ - name: test without specifying required module options ec2_asg: aws_access_key: "{{ aws_access_key }}" aws_secret_key: "{{ aws_secret_key }}" security_token: "{{ security_token | default(omit) }}" ignore_errors: yes register: result - name: assert name is a required module option assert: that: - "result.msg == 'missing required arguments: name'" - name: Run ec2_asg integration tests. module_defaults: group/aws: aws_access_key: "{{ aws_access_key }}" aws_secret_key: "{{ aws_secret_key }}" security_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: # ============================================================ - name: Find AMI to use ec2_ami_info: owners: 'amazon' filters: name: '{{ ec2_ami_name }}' register: ec2_amis - set_fact: ec2_ami_image: '{{ ec2_amis.images[0].image_id }}' - name: load balancer name has to be less than 32 characters # the 8 digit identifier at the end of resource_prefix helps determine during which test something # was created set_fact: load_balancer_name: "{{ item }}-lb" with_items: "{{ resource_prefix | regex_findall('.{8}$') }}" # Set up the testing dependencies: VPC, subnet, security group, and two launch configurations - name: Create VPC for use in testing ec2_vpc_net: name: "{{ resource_prefix }}-vpc" cidr_block: 10.55.77.0/24 tenancy: default register: testing_vpc - name: Create internet gateway for use in testing ec2_vpc_igw: vpc_id: "{{ testing_vpc.vpc.id }}" state: present register: igw - name: Create subnet for use in testing ec2_vpc_subnet: state: present vpc_id: "{{ testing_vpc.vpc.id }}" cidr: 10.55.77.0/24 az: "{{ aws_region }}a" resource_tags: Name: "{{ resource_prefix }}-subnet" register: testing_subnet - name: create routing rules ec2_vpc_route_table: vpc_id: "{{ testing_vpc.vpc.id }}" tags: created: "{{ resource_prefix }}-route" routes: - dest: 0.0.0.0/0 gateway_id: "{{ igw.gateway_id }}" subnets: - "{{ testing_subnet.subnet.id }}" - name: create a security group with the vpc created in the ec2_setup ec2_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" rules: - proto: tcp from_port: 22 to_port: 22 cidr_ip: 0.0.0.0/0 - proto: tcp from_port: 80 to_port: 80 cidr_ip: 0.0.0.0/0 register: sg - name: ensure launch configs exist ec2_lc: name: "{{ item }}" assign_public_ip: true image_id: "{{ ec2_ami_image }}" user_data: | #cloud-config package_upgrade: true package_update: true packages: - httpd runcmd: - "service httpd start" security_groups: "{{ sg.group_id }}" instance_type: t3.micro with_items: - "{{ resource_prefix }}-lc" - "{{ resource_prefix }}-lc-2" # ============================================================ - name: launch asg and wait for instances to be deemed healthy (no ELB) ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc" desired_capacity: 1 min_size: 1 max_size: 1 vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" state: present wait_for_instances: yes register: output - assert: that: - "output.viable_instances == 1" # - name: pause for a bit to make sure that the group can't be trivially deleted # pause: seconds=30 - name: kill asg ec2_asg: name: "{{ resource_prefix }}-asg" state: absent wait_timeout: 800 async: 400 # ============================================================ - name: launch asg and do not wait for instances to be deemed healthy (no ELB) ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc" desired_capacity: 1 min_size: 1 max_size: 1 vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" wait_for_instances: no state: present register: output - assert: that: - "output.viable_instances == 0" - name: kill asg ec2_asg: name: "{{ resource_prefix }}-asg" state: absent wait_timeout: 800 async: 400 # ============================================================ - name: create asg with asg metrics enabled ec2_asg: name: "{{ resource_prefix }}-asg" metrics_collection: true launch_config_name: "{{ resource_prefix }}-lc" desired_capacity: 0 min_size: 0 max_size: 0 vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" state: present register: output - assert: that: - "'Group' in output.metrics_collection.0.Metric" - name: kill asg ec2_asg: name: "{{ resource_prefix }}-asg" state: absent wait_timeout: 800 async: 400 # ============================================================ - name: launch load balancer ec2_elb_lb: name: "{{ load_balancer_name }}" state: present security_group_ids: - "{{ sg.group_id }}" subnets: "{{ testing_subnet.subnet.id }}" connection_draining_timeout: 60 listeners: - protocol: http load_balancer_port: 80 instance_port: 80 health_check: ping_protocol: tcp ping_port: 80 ping_path: "/" response_timeout: 5 interval: 10 unhealthy_threshold: 4 healthy_threshold: 2 register: load_balancer - name: launch asg and wait for instances to be deemed healthy (ELB) ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc" health_check_type: ELB desired_capacity: 1 min_size: 1 max_size: 1 health_check_period: 300 vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" load_balancers: "{{ load_balancer_name }}" wait_for_instances: yes wait_timeout: 900 state: present register: output - assert: that: - "output.viable_instances == 1" # ============================================================ # grow scaling group to 3 - name: add 2 more instances wait for instances to be deemed healthy (ELB) ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc" health_check_type: ELB desired_capacity: 3 min_size: 3 max_size: 5 health_check_period: 600 vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" load_balancers: "{{ load_balancer_name }}" wait_for_instances: yes wait_timeout: 1200 state: present register: output - assert: that: - "output.viable_instances == 3" # ============================================================ # # perform rolling replace with different launch configuration - name: perform rolling update to new AMI ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc-2" health_check_type: ELB desired_capacity: 3 min_size: 1 max_size: 5 health_check_period: 900 load_balancers: "{{ load_balancer_name }}" vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" wait_for_instances: yes replace_all_instances: yes wait_timeout: 1800 state: present register: output # ensure that all instances have new launch config - assert: that: - "item.value.launch_config_name == '{{ resource_prefix }}-lc-2'" with_dict: "{{ output.instance_facts }}" # assert they are all healthy and that the rolling update resulted in the appropriate number of instances - assert: that: - "output.viable_instances == 3" # ============================================================ # perform rolling replace with the original launch configuration - name: perform rolling update to new AMI while removing the load balancer ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc" health_check_type: EC2 desired_capacity: 3 min_size: 1 max_size: 5 health_check_period: 900 load_balancers: [] vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" wait_for_instances: yes replace_all_instances: yes wait_timeout: 1800 state: present register: output # ensure that all instances have new launch config - assert: that: - "item.value.launch_config_name == '{{ resource_prefix }}-lc'" with_dict: "{{ output.instance_facts }}" # assert they are all healthy and that the rolling update resulted in the appropriate number of instances # there should be the same number of instances as there were before the rolling update was performed - assert: that: - "output.viable_instances == 3" # ============================================================ # perform rolling replace with new launch configuration and lc_check:false # Note - this is done async so we can query asg_facts during # the execution. Issues #28087 and #35993 result in correct # end result, but spin up extraneous instances during execution. - name: "perform rolling update to new AMI with lc_check: false" ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc-2" health_check_type: EC2 desired_capacity: 3 min_size: 1 max_size: 5 health_check_period: 900 load_balancers: [] vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" wait_for_instances: yes replace_all_instances: yes replace_batch_size: 3 lc_check: false wait_timeout: 1800 state: present async: 1800 poll: 0 register: asg_job - name: get ec2_asg facts for 3 minutes ec2_asg_info: name: "{{ resource_prefix }}-asg" register: output loop_control: pause: 15 with_sequence: count=12 - set_fact: inst_id_json_query: 'results[*].results[*].instances[*].instance_id' # Since we started with 3 servers and replace all of them. # We should see 6 servers total. - assert: that: - "lookup('flattened',output|json_query(inst_id_json_query)).split(',')|unique|length == 6" - name: Ensure ec2_asg task completes async_status: jid="{{ asg_job.ansible_job_id }}" register: status until: status is finished retries: 200 delay: 15 # ============================================================ - name: kill asg ec2_asg: name: "{{ resource_prefix }}-asg" state: absent wait_timeout: 800 async: 400 # Create new asg with replace_all_instances and lc_check:false # Note - this is done async so we can query asg_facts during # the execution. Issues #28087 results in correct # end result, but spin up extraneous instances during execution. - name: "new asg with lc_check: false" ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc" health_check_type: EC2 desired_capacity: 3 min_size: 1 max_size: 5 health_check_period: 900 load_balancers: [] vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" wait_for_instances: yes replace_all_instances: yes replace_batch_size: 3 lc_check: false wait_timeout: 1800 state: present async: 1800 poll: 0 register: asg_job # Collect ec2_asg_info for 3 minutes - name: get ec2_asg information ec2_asg_info: name: "{{ resource_prefix }}-asg" register: output loop_control: pause: 15 with_sequence: count=12 - set_fact: inst_id_json_query: 'results[*].results[*].instances[*].instance_id' # Get all instance_ids we saw and assert we saw number expected # Should only see 3 (don't replace instances we just created) - assert: that: - "lookup('flattened',output|json_query(inst_id_json_query)).split(',')|unique|length == 3" - name: Ensure ec2_asg task completes async_status: jid="{{ asg_job.ansible_job_id }}" register: status until: status is finished retries: 200 delay: 15 # ============================================================ always: - name: kill asg ec2_asg: name: "{{ resource_prefix }}-asg" state: absent register: removed until: removed is not failed ignore_errors: yes retries: 10 # Remove the testing dependencies - name: remove the load balancer ec2_elb_lb: name: "{{ load_balancer_name }}" state: absent security_group_ids: - "{{ sg.group_id }}" subnets: "{{ testing_subnet.subnet.id }}" wait: yes connection_draining_timeout: 60 listeners: - protocol: http load_balancer_port: 80 instance_port: 80 health_check: ping_protocol: tcp ping_port: 80 ping_path: "/" response_timeout: 5 interval: 10 unhealthy_threshold: 4 healthy_threshold: 2 register: removed until: removed is not failed ignore_errors: yes retries: 10 - name: remove launch configs ec2_lc: name: "{{ resource_prefix }}-lc" state: absent register: removed until: removed is not failed ignore_errors: yes retries: 10 with_items: - "{{ resource_prefix }}-lc" - "{{ resource_prefix }}-lc-2" - name: remove the security group ec2_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" state: absent register: removed until: removed is not failed ignore_errors: yes retries: 10 - name: remove routing rules ec2_vpc_route_table: state: absent vpc_id: "{{ testing_vpc.vpc.id }}" tags: created: "{{ resource_prefix }}-route" routes: - dest: 0.0.0.0/0 gateway_id: "{{ igw.gateway_id }}" subnets: - "{{ testing_subnet.subnet.id }}" register: removed until: removed is not failed ignore_errors: yes retries: 10 - name: remove internet gateway ec2_vpc_igw: vpc_id: "{{ testing_vpc.vpc.id }}" state: absent register: removed until: removed is not failed ignore_errors: yes retries: 10 - name: remove the subnet ec2_vpc_subnet: state: absent vpc_id: "{{ testing_vpc.vpc.id }}" cidr: 10.55.77.0/24 register: removed until: removed is not failed ignore_errors: yes retries: 10 - name: remove the VPC ec2_vpc_net: name: "{{ resource_prefix }}-vpc" cidr_block: 10.55.77.0/24 state: absent register: removed until: removed is not failed ignore_errors: yes retries: 10
closed
ansible/ansible
https://github.com/ansible/ansible
61,085
ec2_asg tagging results in "modified" status if tags are not alphabetized
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY When the ec2_asg module attempts to evaluate whether tags for an ASG have changed, it compares the input list of tags (in the order given) to the API result (in the order returned by the API, A-Za-z). If the ordering is not the same (non-alphabetical input), the module believes that modifications have been made, and issues API calls to replace the tags. This is due to the [use](https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/cloud/amazon/ec2_asg.py#L1047) of `zip()` without respect to key ordering. This can be resolved by sorting the input dictionaries by key prior to `zip()`, or by switching to something [like](https://github.com/ansible/ansible/blob/73e171fd946b74089a99051858d8d49e561dea41/lib/ansible/module_utils/ec2.py#L706) `compare_aws_tags()` from `ansible.module_utils.ec2`. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ec2_asg ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.8.4 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> N/A ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml - ec2_asg: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] launch_config_name: 'lc-1' min_size: 1 max_size: 10 desired_capacity: 5 vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] tags: - environment: production Propagate_at_launch: no ``` This will always show modification, as the tags are listed in an order different than the API response. <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Tags are evaluated by matching keys, without regard to alphabetical input ordering ##### ACTUAL RESULTS Ansible shows modifications have occurred and updates the ASG's tags, even though no true modifications have been made.
https://github.com/ansible/ansible/issues/61085
https://github.com/ansible/ansible/pull/61284
d7604844c2a489bb13216dd6340345ac2bb1df7f
b8650c0a50eb76aa1146ea7119d3451e2253037f
2019-08-22T02:48:10Z
python
2019-09-06T19:48:40Z
changelogs/fragments/61284-ec2_asg-idempotency.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,085
ec2_asg tagging results in "modified" status if tags are not alphabetized
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY When the ec2_asg module attempts to evaluate whether tags for an ASG have changed, it compares the input list of tags (in the order given) to the API result (in the order returned by the API, A-Za-z). If the ordering is not the same (non-alphabetical input), the module believes that modifications have been made, and issues API calls to replace the tags. This is due to the [use](https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/cloud/amazon/ec2_asg.py#L1047) of `zip()` without respect to key ordering. This can be resolved by sorting the input dictionaries by key prior to `zip()`, or by switching to something [like](https://github.com/ansible/ansible/blob/73e171fd946b74089a99051858d8d49e561dea41/lib/ansible/module_utils/ec2.py#L706) `compare_aws_tags()` from `ansible.module_utils.ec2`. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ec2_asg ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.8.4 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> N/A ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml - ec2_asg: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] launch_config_name: 'lc-1' min_size: 1 max_size: 10 desired_capacity: 5 vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] tags: - environment: production Propagate_at_launch: no ``` This will always show modification, as the tags are listed in an order different than the API response. <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Tags are evaluated by matching keys, without regard to alphabetical input ordering ##### ACTUAL RESULTS Ansible shows modifications have occurred and updates the ASG's tags, even though no true modifications have been made.
https://github.com/ansible/ansible/issues/61085
https://github.com/ansible/ansible/pull/61284
d7604844c2a489bb13216dd6340345ac2bb1df7f
b8650c0a50eb76aa1146ea7119d3451e2253037f
2019-08-22T02:48:10Z
python
2019-09-06T19:48:40Z
hacking/aws_config/testing_policies/compute-policy.json
{# Not all Autoscaling API Actions allow specified resources #} {# See http://docs.aws.amazon.com/autoscaling/latest/userguide/control-access-using-iam.html#policy-auto-scaling-resources #} { "Version": "2012-10-17", "Statement": [ { "Sid": "DescribeAutoscaling", "Effect": "Allow", "Action": [ "autoscaling:DescribeAutoScalingGroups", "autoscaling:DescribeLaunchConfigurations", "autoscaling:DescribePolicies" ], "Resource": "*" }, { "Sid": "AllowAutoscaling", "Effect": "Allow", "Action": [ "autoscaling:*LaunchConfiguration", "autoscaling:*AutoScalingGroup", "autoscaling:*MetricsCollection", "autoscaling:PutScalingPolicy", "autoscaling:DeletePolicy" ], "Resource": [ "arn:aws:autoscaling:{{aws_region}}:{{aws_account}}:*" ] }, {# Note that not all EC2 API Actions allow a specific resource #} {# See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ec2-api-permissions.html#ec2-api-unsupported-resource-permissions #} { "Sid": "AllowUnspecifiedEC2Resource", "Effect": "Allow", "Action": [ "ec2:*LaunchTemplate", "ec2:*LaunchTemplateVersion", "ec2:*LaunchTemplateVersions", "ec2:AllocateAddress", "ec2:AssociateAddress", "ec2:AssociateDhcpOptions", "ec2:AssociateRouteTable", "ec2:AssociateVpcCidrBlock", "ec2:AssociateSubnetCidrBlock", "ec2:AttachInternetGateway", "ec2:AttachNetworkInterface", "ec2:AttachVolume", "ec2:AttachVpnGateway", "ec2:CreateCustomerGateway", "ec2:CreateDhcpOptions", "ec2:CreateImage", "ec2:CreateInternetGateway", "ec2:CreateKeyPair", "ec2:CreateNatGateway", "ec2:CreateNetworkInterface", "ec2:CreateRoute", "ec2:CreateRouteTable", "ec2:CreateSecurityGroup", "ec2:CreateSnapshot", "ec2:CreateSubnet", "ec2:CreateTags", "ec2:CreateVpc", "ec2:CreateVpnConnection", "ec2:CreateVpnGateway", "ec2:DeleteCustomerGateway", "ec2:DeleteDhcpOptions", "ec2:DeleteInternetGateway", "ec2:DeleteKeyPair", "ec2:DeleteNatGateway", "ec2:DeleteNetworkInterface", "ec2:DeleteRoute", "ec2:DeleteRouteTable", "ec2:DeleteSnapshot", "ec2:DeleteSubnet", "ec2:DeleteTags", "ec2:DeleteVpc", "ec2:DeleteVpnConnection", "ec2:DeleteVpnGateway", "ec2:DeregisterImage", "ec2:DetachInternetGateway", "ec2:DetachVpnGateway", "ec2:Describe*", "ec2:DisassociateAddress", "ec2:DisassociateRouteTable", "ec2:DisassociateSubnetCidrBlock", "ec2:ImportKeyPair", "ec2:ModifyImageAttribute", "ec2:ModifyInstanceAttribute", "ec2:ModifySubnetAttribute", "ec2:ModifyVpcAttribute", "ec2:RegisterImage", "ec2:ReleaseAddress", "ec2:ReplaceRouteTableAssociation", "ec2:ReplaceIamInstanceProfileAssociation", "ec2:ReportInstanceStatus" ], "Resource": "*" }, { "Sid": "AllowSpecifiedEC2Resource", "Effect": "Allow", "Action": [ "ec2:AuthorizeSecurityGroupIngress", "ec2:AuthorizeSecurityGroupEgress", "ec2:CreateTags", "ec2:CreateVolume", "ec2:DeleteRouteTable", "ec2:DeleteSecurityGroup", "ec2:DeleteVolume", "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress", "ec2:RunInstances", "ec2:StartInstances", "ec2:StopInstances", "ec2:TerminateInstances", "ec2:UpdateSecurityGroupRuleDescriptionsIngress", "ec2:UpdateSecurityGroupRuleDescriptionsEgress" ], "Resource": [ "arn:aws:ec2:{{aws_region}}::image/*", "arn:aws:ec2:{{aws_region}}:{{aws_account}}:*" ] }, {# According to http://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/load-balancer-authentication-access-control.html #} {# Resource level access control is not possible for the new ELB API (providing Application Load Balancer functionality #} {# While it remains possible for the old API, there is no distinction of the Actions between old API and new API #} { "Sid": "AllowLoadBalancerOperations", "Effect": "Allow", "Action": [ "elasticloadbalancing:AddTags", "elasticloadbalancing:ConfigureHealthCheck", "elasticloadbalancing:CreateListener", "elasticloadbalancing:CreateLoadBalancer", "elasticloadbalancing:CreateLoadBalancerListeners", "elasticloadbalancing:CreateRule", "elasticloadbalancing:CreateTargetGroup", "elasticloadbalancing:DeleteListener", "elasticloadbalancing:DeleteLoadBalancer", "elasticloadbalancing:DeleteLoadBalancerListeners", "elasticloadbalancing:DeleteRule", "elasticloadbalancing:DeleteTargetGroup", "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", "elasticloadbalancing:DescribeInstanceHealth", "elasticloadbalancing:DescribeLoadBalancer*", "elasticloadbalancing:DescribeTags", "elasticloadbalancing:DisableAvailabilityZonesForLoadBalancer", "elasticloadbalancing:EnableAvailabilityZonesForLoadBalancer", "elasticloadbalancing:ModifyListener", "elasticloadbalancing:ModifyLoadBalancerAttributes", "elasticloadbalancing:ModifyRule", "elasticloadbalancing:RegisterInstancesWithLoadBalancer", "elasticloadbalancing:RemoveTags" ], "Resource": "*" }, {# Only certain lambda actions can be restricted to a specific resource #} {# http://docs.aws.amazon.com/lambda/latest/dg/lambda-api-permissions-ref.html #} { "Sid": "AllowApiGateway", "Effect": "Allow", "Action": [ "apigateway:*" ], "Resource": [ "arn:aws:apigateway:{{aws_region}}::/*" ] }, { "Sid": "AllowGetUserForLambdaCreation", "Effect": "Allow", "Action": [ "iam:GetUser" ], "Resource": [ "arn:aws:iam::{{aws_account}}:user/ansible_integration_tests" ] }, { "Sid": "AllowLambdaManagementWithoutResource", "Effect": "Allow", "Action": [ "lambda:CreateEventSourceMapping", "lambda:GetAccountSettings", "lambda:GetEventSourceMapping", "lambda:List*", "lambda:TagResource", "lambda:UntagResource" ], "Resource": "*" }, { "Sid": "AllowLambdaManagementWithResource", "Effect": "Allow", "Action": [ "lambda:AddPermission", "lambda:CreateAlias", "lambda:CreateFunction", "lambda:DeleteAlias", "lambda:DeleteFunction", "lambda:GetAlias", "lambda:GetFunction", "lambda:GetFunctionConfiguration", "lambda:GetPolicy", "lambda:InvokeFunction", "lambda:PublishVersion", "lambda:RemovePermission", "lambda:UpdateAlias", "lambda:UpdateEventSourceMapping", "lambda:UpdateFunctionCode", "lambda:UpdateFunctionConfiguration" ], "Resource": "arn:aws:lambda:{{aws_region}}:{{aws_account}}:function:*" }, { "Sid": "AllowRoleManagement", "Effect": "Allow", "Action": [ "iam:PassRole" ], "Resource": [ "arn:aws:iam::{{aws_account}}:role/ansible_lambda_role", "arn:aws:iam::{{aws_account}}:role/ecsInstanceRole", "arn:aws:iam::{{aws_account}}:role/ec2InstanceRole", "arn:aws:iam::{{aws_account}}:role/ecsServiceRole", "arn:aws:iam::{{aws_account}}:role/aws_eks_cluster_role", "arn:aws:iam::{{aws_account}}:role/ecsTaskExecutionRole" ] }, { "Sid": "AllowSESManagement", "Effect": "Allow", "Action": [ "ses:VerifyEmailIdentity", "ses:DeleteIdentity", "ses:GetIdentityVerificationAttributes", "ses:GetIdentityNotificationAttributes", "ses:VerifyDomainIdentity", "ses:SetIdentityNotificationTopic", "ses:SetIdentityHeadersInNotificationsEnabled", "ses:SetIdentityFeedbackForwardingEnabled", "ses:GetIdentityPolicies", "ses:PutIdentityPolicy", "ses:DeleteIdentityPolicy", "ses:ListIdentityPolicies", "ses:SetIdentityFeedbackForwardingEnabled", "ses:ListReceiptRuleSets", "ses:DescribeReceiptRuleSet", "ses:DescribeActiveReceiptRuleSet", "ses:SetActiveReceiptRuleSet", "ses:CreateReceiptRuleSet", "ses:DeleteReceiptRuleSet" ], "Resource": [ "*" ] }, { "Sid": "AllowSNSManagement", "Effect": "Allow", "Action": [ "SNS:CreateTopic", "SNS:DeleteTopic", "SNS:GetTopicAttributes", "SNS:ListSubscriptions", "SNS:ListSubscriptionsByTopic", "SNS:ListTopics", "SNS:SetTopicAttributes", "SNS:Subscribe", "SNS:Unsubscribe" ], "Resource": [ "*" ] } ] }
closed
ansible/ansible
https://github.com/ansible/ansible
61,085
ec2_asg tagging results in "modified" status if tags are not alphabetized
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY When the ec2_asg module attempts to evaluate whether tags for an ASG have changed, it compares the input list of tags (in the order given) to the API result (in the order returned by the API, A-Za-z). If the ordering is not the same (non-alphabetical input), the module believes that modifications have been made, and issues API calls to replace the tags. This is due to the [use](https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/cloud/amazon/ec2_asg.py#L1047) of `zip()` without respect to key ordering. This can be resolved by sorting the input dictionaries by key prior to `zip()`, or by switching to something [like](https://github.com/ansible/ansible/blob/73e171fd946b74089a99051858d8d49e561dea41/lib/ansible/module_utils/ec2.py#L706) `compare_aws_tags()` from `ansible.module_utils.ec2`. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ec2_asg ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.8.4 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> N/A ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml - ec2_asg: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] launch_config_name: 'lc-1' min_size: 1 max_size: 10 desired_capacity: 5 vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] tags: - environment: production Propagate_at_launch: no ``` This will always show modification, as the tags are listed in an order different than the API response. <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Tags are evaluated by matching keys, without regard to alphabetical input ordering ##### ACTUAL RESULTS Ansible shows modifications have occurred and updates the ASG's tags, even though no true modifications have been made.
https://github.com/ansible/ansible/issues/61085
https://github.com/ansible/ansible/pull/61284
d7604844c2a489bb13216dd6340345ac2bb1df7f
b8650c0a50eb76aa1146ea7119d3451e2253037f
2019-08-22T02:48:10Z
python
2019-09-06T19:48:40Z
lib/ansible/modules/cloud/amazon/ec2_asg.py
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: ec2_asg short_description: Create or delete AWS Autoscaling Groups description: - Can create or delete AWS Autoscaling Groups - Can be used with the ec2_lc module to manage Launch Configurations version_added: "1.6" author: "Gareth Rushgrove (@garethr)" requirements: [ "boto3", "botocore" ] options: state: description: - register or deregister the instance choices: ['present', 'absent'] default: present name: description: - Unique name for group to be created or deleted required: true load_balancers: description: - List of ELB names to use for the group. Use for classic load balancers. target_group_arns: description: - List of target group ARNs to use for the group. Use for application load balancers. version_added: "2.4" availability_zones: description: - List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set. launch_config_name: description: - Name of the Launch configuration to use for the group. See the ec2_lc module for managing these. If unspecified then the current group value will be used. One of launch_config_name or launch_template must be provided. launch_template: description: - Dictionary describing the Launch Template to use suboptions: version: description: - The version number of the launch template to use. Defaults to latest version if not provided. default: "latest" launch_template_name: description: - The name of the launch template. Only one of launch_template_name or launch_template_id is required. launch_template_id: description: - The id of the launch template. Only one of launch_template_name or launch_template_id is required. version_added: "2.8" min_size: description: - Minimum number of instances in group, if unspecified then the current group value will be used. max_size: description: - Maximum number of instances in group, if unspecified then the current group value will be used. placement_group: description: - Physical location of your cluster placement group created in Amazon EC2. version_added: "2.3" desired_capacity: description: - Desired number of instances in group, if unspecified then the current group value will be used. replace_all_instances: description: - In a rolling fashion, replace all instances that used the old launch configuration with one from the new launch configuration. It increases the ASG size by C(replace_batch_size), waits for the new instances to be up and running. After that, it terminates a batch of old instances, waits for the replacements, and repeats, until all old instances are replaced. Once that's done the ASG size is reduced back to the expected size. version_added: "1.8" default: 'no' type: bool replace_batch_size: description: - Number of instances you'd like to replace at a time. Used with replace_all_instances. required: false version_added: "1.8" default: 1 replace_instances: description: - List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration. version_added: "1.8" lc_check: description: - Check to make sure instances that are being replaced with replace_instances do not already have the current launch_config. version_added: "1.8" default: 'yes' type: bool lt_check: description: - Check to make sure instances that are being replaced with replace_instances do not already have the current launch_template or launch_template version. version_added: "2.8" default: 'yes' type: bool vpc_zone_identifier: description: - List of VPC subnets to use tags: description: - A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true. version_added: "1.7" health_check_period: description: - Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. required: false default: 300 seconds version_added: "1.7" health_check_type: description: - The service you want the health status from, Amazon EC2 or Elastic Load Balancer. required: false default: EC2 version_added: "1.7" choices: ['EC2', 'ELB'] default_cooldown: description: - The number of seconds after a scaling activity completes before another can begin. default: 300 seconds version_added: "2.0" wait_timeout: description: - How long to wait for instances to become viable when replaced. If you experience the error "Waited too long for ELB instances to be healthy", try increasing this value. default: 300 version_added: "1.8" wait_for_instances: description: - Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all instances have a lifecycle_state of "InService" and a health_status of "Healthy". version_added: "1.9" default: 'yes' type: bool termination_policies: description: - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity. - For 'Default', when used to create a new autoscaling group, the "Default"i value is used. When used to change an existent autoscaling group, the current termination policies are maintained. default: Default choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default'] version_added: "2.0" notification_topic: description: - A SNS topic ARN to send auto scaling notifications to. version_added: "2.2" notification_types: description: - A list of auto scaling events to trigger notifications on. default: - 'autoscaling:EC2_INSTANCE_LAUNCH' - 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR' - 'autoscaling:EC2_INSTANCE_TERMINATE' - 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' required: false version_added: "2.2" suspend_processes: description: - A list of scaling processes to suspend. default: [] choices: ['Launch', 'Terminate', 'HealthCheck', 'ReplaceUnhealthy', 'AZRebalance', 'AlarmNotification', 'ScheduledActions', 'AddToLoadBalancer'] version_added: "2.3" metrics_collection: description: - Enable ASG metrics collection type: bool default: 'no' version_added: "2.6" metrics_granularity: description: - When metrics_collection is enabled this will determine granularity of metrics collected by CloudWatch default: "1minute" version_added: "2.6" metrics_list: description: - List of autoscaling metrics to collect when enabling metrics_collection default: - 'GroupMinSize' - 'GroupMaxSize' - 'GroupDesiredCapacity' - 'GroupInServiceInstances' - 'GroupPendingInstances' - 'GroupStandbyInstances' - 'GroupTerminatingInstances' - 'GroupTotalInstances' version_added: "2.6" extends_documentation_fragment: - aws - ec2 """ EXAMPLES = ''' # Basic configuration with Launch Configuration - ec2_asg: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] launch_config_name: 'lc-1' min_size: 1 max_size: 10 desired_capacity: 5 vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] tags: - environment: production propagate_at_launch: no # Rolling ASG Updates # Below is an example of how to assign a new launch config to an ASG and terminate old instances. # # All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in # a rolling fashion with instances using the current launch configuration, "my_new_lc". # # This could also be considered a rolling deploy of a pre-baked AMI. # # If this is a newly created group, the instances will not be replaced since all instances # will have the current launch configuration. - name: create launch config ec2_lc: name: my_new_lc image_id: ami-lkajsf key_name: mykey region: us-east-1 security_groups: sg-23423 instance_type: m1.small assign_public_ip: yes - ec2_asg: name: myasg launch_config_name: my_new_lc health_check_period: 60 health_check_type: ELB replace_all_instances: yes min_size: 5 max_size: 5 desired_capacity: 5 region: us-east-1 # To only replace a couple of instances instead of all of them, supply a list # to "replace_instances": - ec2_asg: name: myasg launch_config_name: my_new_lc health_check_period: 60 health_check_type: ELB replace_instances: - i-b345231 - i-24c2931 min_size: 5 max_size: 5 desired_capacity: 5 region: us-east-1 # Basic Configuration with Launch Template - ec2_asg: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] launch_template: version: '1' launch_template_name: 'lt-example' launch_template_id: 'lt-123456' min_size: 1 max_size: 10 desired_capacity: 5 vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] tags: - environment: production propagate_at_launch: no ''' RETURN = ''' --- auto_scaling_group_name: description: The unique name of the auto scaling group returned: success type: str sample: "myasg" auto_scaling_group_arn: description: The unique ARN of the autoscaling group returned: success type: str sample: "arn:aws:autoscaling:us-east-1:123456789012:autoScalingGroup:6a09ad6d-eeee-1234-b987-ee123ced01ad:autoScalingGroupName/myasg" availability_zones: description: The availability zones for the auto scaling group returned: success type: list sample: [ "us-east-1d" ] created_time: description: Timestamp of create time of the auto scaling group returned: success type: str sample: "2017-11-08T14:41:48.272000+00:00" default_cooldown: description: The default cooldown time in seconds. returned: success type: int sample: 300 desired_capacity: description: The number of EC2 instances that should be running in this group. returned: success type: int sample: 3 healthcheck_period: description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. returned: success type: int sample: 30 healthcheck_type: description: The service you want the health status from, one of "EC2" or "ELB". returned: success type: str sample: "ELB" healthy_instances: description: Number of instances in a healthy state returned: success type: int sample: 5 in_service_instances: description: Number of instances in service returned: success type: int sample: 3 instance_facts: description: Dictionary of EC2 instances and their status as it relates to the ASG. returned: success type: dict sample: { "i-0123456789012": { "health_status": "Healthy", "launch_config_name": "public-webapp-production-1", "lifecycle_state": "InService" } } instances: description: list of instance IDs in the ASG returned: success type: list sample: [ "i-0123456789012" ] launch_config_name: description: > Name of launch configuration associated with the ASG. Same as launch_configuration_name, provided for compatibility with ec2_asg module. returned: success type: str sample: "public-webapp-production-1" load_balancers: description: List of load balancers names attached to the ASG. returned: success type: list sample: ["elb-webapp-prod"] max_size: description: Maximum size of group returned: success type: int sample: 3 min_size: description: Minimum size of group returned: success type: int sample: 1 pending_instances: description: Number of instances in pending state returned: success type: int sample: 1 tags: description: List of tags for the ASG, and whether or not each tag propagates to instances at launch. returned: success type: list sample: [ { "key": "Name", "value": "public-webapp-production-1", "resource_id": "public-webapp-production-1", "resource_type": "auto-scaling-group", "propagate_at_launch": "true" }, { "key": "env", "value": "production", "resource_id": "public-webapp-production-1", "resource_type": "auto-scaling-group", "propagate_at_launch": "true" } ] target_group_arns: description: List of ARNs of the target groups that the ASG populates returned: success type: list sample: [ "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b", "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234" ] target_group_names: description: List of names of the target groups that the ASG populates returned: success type: list sample: [ "target-group-host-hello", "target-group-path-world" ] termination_policies: description: A list of termination policies for the group. returned: success type: str sample: ["Default"] unhealthy_instances: description: Number of instances in an unhealthy state returned: success type: int sample: 0 viable_instances: description: Number of instances in a viable state returned: success type: int sample: 1 vpc_zone_identifier: description: VPC zone ID / subnet id for the auto scaling group returned: success type: str sample: "subnet-a31ef45f" metrics_collection: description: List of enabled AutosSalingGroup metrics returned: success type: list sample: [ { "Granularity": "1Minute", "Metric": "GroupInServiceInstances" } ] ''' import time import traceback from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, HAS_BOTO3, camel_dict_to_snake_dict, get_aws_connection_info, AWSRetry try: import botocore except ImportError: pass # will be detected by imported HAS_BOTO3 ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', 'LoadBalancerNames', 'MaxSize', 'MinSize', 'AutoScalingGroupName', 'PlacementGroup', 'TerminationPolicies', 'VPCZoneIdentifier') INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') backoff_params = dict(tries=10, delay=3, backoff=1.5) @AWSRetry.backoff(**backoff_params) def describe_autoscaling_groups(connection, group_name): pg = connection.get_paginator('describe_auto_scaling_groups') return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get('AutoScalingGroups', []) @AWSRetry.backoff(**backoff_params) def deregister_lb_instances(connection, lb_name, instance_id): connection.deregister_instances_from_load_balancer(LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)]) @AWSRetry.backoff(**backoff_params) def describe_instance_health(connection, lb_name, instances): params = dict(LoadBalancerName=lb_name) if instances: params.update(Instances=instances) return connection.describe_instance_health(**params) @AWSRetry.backoff(**backoff_params) def describe_target_health(connection, target_group_arn, instances): return connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=instances) @AWSRetry.backoff(**backoff_params) def suspend_asg_processes(connection, asg_name, processes): connection.suspend_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes) @AWSRetry.backoff(**backoff_params) def resume_asg_processes(connection, asg_name, processes): connection.resume_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes) @AWSRetry.backoff(**backoff_params) def describe_launch_configurations(connection, launch_config_name): pg = connection.get_paginator('describe_launch_configurations') return pg.paginate(LaunchConfigurationNames=[launch_config_name]).build_full_result() @AWSRetry.backoff(**backoff_params) def describe_launch_templates(connection, launch_template): if launch_template['launch_template_id'] is not None: try: lt = connection.describe_launch_templates(LaunchTemplateIds=[launch_template['launch_template_id']]) return lt except (botocore.exceptions.ClientError) as e: module.fail_json(msg="No launch template found matching: %s" % launch_template) else: try: lt = connection.describe_launch_templates(LaunchTemplateNames=[launch_template['launch_template_name']]) return lt except (botocore.exceptions.ClientError) as e: module.fail_json(msg="No launch template found matching: %s" % launch_template) @AWSRetry.backoff(**backoff_params) def create_asg(connection, **params): connection.create_auto_scaling_group(**params) @AWSRetry.backoff(**backoff_params) def put_notification_config(connection, asg_name, topic_arn, notification_types): connection.put_notification_configuration( AutoScalingGroupName=asg_name, TopicARN=topic_arn, NotificationTypes=notification_types ) @AWSRetry.backoff(**backoff_params) def del_notification_config(connection, asg_name, topic_arn): connection.delete_notification_configuration( AutoScalingGroupName=asg_name, TopicARN=topic_arn ) @AWSRetry.backoff(**backoff_params) def attach_load_balancers(connection, asg_name, load_balancers): connection.attach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers) @AWSRetry.backoff(**backoff_params) def detach_load_balancers(connection, asg_name, load_balancers): connection.detach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers) @AWSRetry.backoff(**backoff_params) def attach_lb_target_groups(connection, asg_name, target_group_arns): connection.attach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns) @AWSRetry.backoff(**backoff_params) def detach_lb_target_groups(connection, asg_name, target_group_arns): connection.detach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns) @AWSRetry.backoff(**backoff_params) def update_asg(connection, **params): connection.update_auto_scaling_group(**params) @AWSRetry.backoff(catch_extra_error_codes=['ScalingActivityInProgress'], **backoff_params) def delete_asg(connection, asg_name, force_delete): connection.delete_auto_scaling_group(AutoScalingGroupName=asg_name, ForceDelete=force_delete) @AWSRetry.backoff(**backoff_params) def terminate_asg_instance(connection, instance_id, decrement_capacity): connection.terminate_instance_in_auto_scaling_group(InstanceId=instance_id, ShouldDecrementDesiredCapacity=decrement_capacity) def enforce_required_arguments_for_create(): ''' As many arguments are not required for autoscale group deletion they cannot be mandatory arguments for the module, so we enforce them here ''' missing_args = [] if module.params.get('launch_config_name') is None and module.params.get('launch_template') is None: module.fail_json(msg="Missing either launch_config_name or launch_template for autoscaling group create") for arg in ('min_size', 'max_size'): if module.params[arg] is None: missing_args.append(arg) if missing_args: module.fail_json(msg="Missing required arguments for autoscaling group create: %s" % ",".join(missing_args)) def get_properties(autoscaling_group): properties = dict() properties['healthy_instances'] = 0 properties['in_service_instances'] = 0 properties['unhealthy_instances'] = 0 properties['pending_instances'] = 0 properties['viable_instances'] = 0 properties['terminating_instances'] = 0 instance_facts = dict() autoscaling_group_instances = autoscaling_group.get('Instances') if autoscaling_group_instances: properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances] for i in autoscaling_group_instances: if i.get('LaunchConfigurationName'): instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'], 'lifecycle_state': i['LifecycleState'], 'launch_config_name': i['LaunchConfigurationName']} elif i.get('LaunchTemplate'): instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'], 'lifecycle_state': i['LifecycleState'], 'launch_template': i['LaunchTemplate']} else: instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'], 'lifecycle_state': i['LifecycleState']} if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService': properties['viable_instances'] += 1 if i['HealthStatus'] == 'Healthy': properties['healthy_instances'] += 1 else: properties['unhealthy_instances'] += 1 if i['LifecycleState'] == 'InService': properties['in_service_instances'] += 1 if i['LifecycleState'] == 'Terminating': properties['terminating_instances'] += 1 if i['LifecycleState'] == 'Pending': properties['pending_instances'] += 1 else: properties['instances'] = [] properties['auto_scaling_group_name'] = autoscaling_group.get('AutoScalingGroupName') properties['auto_scaling_group_arn'] = autoscaling_group.get('AutoScalingGroupARN') properties['availability_zones'] = autoscaling_group.get('AvailabilityZones') properties['created_time'] = autoscaling_group.get('CreatedTime') properties['instance_facts'] = instance_facts properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames') if autoscaling_group.get('LaunchConfigurationName'): properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName') else: properties['launch_template'] = autoscaling_group.get('LaunchTemplate') properties['tags'] = autoscaling_group.get('Tags') properties['min_size'] = autoscaling_group.get('MinSize') properties['max_size'] = autoscaling_group.get('MaxSize') properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity') properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown') properties['healthcheck_grace_period'] = autoscaling_group.get('HealthCheckGracePeriod') properties['healthcheck_type'] = autoscaling_group.get('HealthCheckType') properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown') properties['termination_policies'] = autoscaling_group.get('TerminationPolicies') properties['target_group_arns'] = autoscaling_group.get('TargetGroupARNs') properties['vpc_zone_identifier'] = autoscaling_group.get('VPCZoneIdentifier') properties['metrics_collection'] = autoscaling_group.get('EnabledMetrics') if properties['target_group_arns']: region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) elbv2_connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) tg_paginator = elbv2_connection.get_paginator('describe_target_groups') tg_result = tg_paginator.paginate(TargetGroupArns=properties['target_group_arns']).build_full_result() target_groups = tg_result['TargetGroups'] else: target_groups = [] properties['target_group_names'] = [tg['TargetGroupName'] for tg in target_groups] return properties def get_launch_object(connection, ec2_connection): launch_object = dict() launch_config_name = module.params.get('launch_config_name') launch_template = module.params.get('launch_template') if launch_config_name is None and launch_template is None: return launch_object elif launch_config_name: try: launch_configs = describe_launch_configurations(connection, launch_config_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Failed to describe launch configurations", exception=traceback.format_exc()) if len(launch_configs['LaunchConfigurations']) == 0: module.fail_json(msg="No launch config found with name %s" % launch_config_name) launch_object = {"LaunchConfigurationName": launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName']} return launch_object elif launch_template: lt = describe_launch_templates(ec2_connection, launch_template)['LaunchTemplates'][0] if launch_template['version'] is not None: launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": launch_template['version']}} else: launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": str(lt['LatestVersionNumber'])}} return launch_object def elb_dreg(asg_connection, group_name, instance_id): region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) as_group = describe_autoscaling_groups(asg_connection, group_name)[0] wait_timeout = module.params.get('wait_timeout') count = 1 if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB': elb_connection = boto3_conn(module, conn_type='client', resource='elb', region=region, endpoint=ec2_url, **aws_connect_params) else: return for lb in as_group['LoadBalancerNames']: deregister_lb_instances(elb_connection, lb, instance_id) module.debug("De-registering %s from ELB %s" % (instance_id, lb)) wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and count > 0: count = 0 for lb in as_group['LoadBalancerNames']: lb_instances = describe_instance_health(elb_connection, lb, []) for i in lb_instances['InstanceStates']: if i['InstanceId'] == instance_id and i['State'] == "InService": count += 1 module.debug("%s: %s, %s" % (i['InstanceId'], i['State'], i['Description'])) time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for instance to deregister. {0}".format(time.asctime())) def elb_healthy(asg_connection, elb_connection, group_name): healthy_instances = set() as_group = describe_autoscaling_groups(asg_connection, group_name)[0] props = get_properties(as_group) # get healthy, inservice instances from ASG instances = [] for instance, settings in props['instance_facts'].items(): if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': instances.append(dict(InstanceId=instance)) module.debug("ASG considers the following instances InService and Healthy: %s" % instances) module.debug("ELB instance status:") lb_instances = list() for lb in as_group.get('LoadBalancerNames'): # we catch a race condition that sometimes happens if the instance exists in the ASG # but has not yet show up in the ELB try: lb_instances = describe_instance_health(elb_connection, lb, instances) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'InvalidInstance': return None module.fail_json(msg="Failed to get load balancer.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except botocore.exceptions.BotoCoreError as e: module.fail_json(msg="Failed to get load balancer.", exception=traceback.format_exc()) for i in lb_instances.get('InstanceStates'): if i['State'] == "InService": healthy_instances.add(i['InstanceId']) module.debug("ELB Health State %s: %s" % (i['InstanceId'], i['State'])) return len(healthy_instances) def tg_healthy(asg_connection, elbv2_connection, group_name): healthy_instances = set() as_group = describe_autoscaling_groups(asg_connection, group_name)[0] props = get_properties(as_group) # get healthy, inservice instances from ASG instances = [] for instance, settings in props['instance_facts'].items(): if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': instances.append(dict(Id=instance)) module.debug("ASG considers the following instances InService and Healthy: %s" % instances) module.debug("Target Group instance status:") tg_instances = list() for tg in as_group.get('TargetGroupARNs'): # we catch a race condition that sometimes happens if the instance exists in the ASG # but has not yet show up in the ELB try: tg_instances = describe_target_health(elbv2_connection, tg, instances) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'InvalidInstance': return None module.fail_json(msg="Failed to get target group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except botocore.exceptions.BotoCoreError as e: module.fail_json(msg="Failed to get target group.", exception=traceback.format_exc()) for i in tg_instances.get('TargetHealthDescriptions'): if i['TargetHealth']['State'] == "healthy": healthy_instances.add(i['Target']['Id']) module.debug("Target Group Health State %s: %s" % (i['Target']['Id'], i['TargetHealth']['State'])) return len(healthy_instances) def wait_for_elb(asg_connection, group_name): region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) wait_timeout = module.params.get('wait_timeout') # if the health_check_type is ELB, we want to query the ELBs directly for instance # status as to avoid health_check_grace period that is awarded to ASG instances as_group = describe_autoscaling_groups(asg_connection, group_name)[0] if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB': module.debug("Waiting for ELB to consider instances healthy.") elb_connection = boto3_conn(module, conn_type='client', resource='elb', region=region, endpoint=ec2_url, **aws_connect_params) wait_timeout = time.time() + wait_timeout healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) module.debug("ELB thinks %s instances are healthy." % healthy_instances) time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) module.debug("Waiting complete. ELB thinks %s instances are healthy." % healthy_instances) def wait_for_target_group(asg_connection, group_name): region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) wait_timeout = module.params.get('wait_timeout') # if the health_check_type is ELB, we want to query the ELBs directly for instance # status as to avoid health_check_grace period that is awarded to ASG instances as_group = describe_autoscaling_groups(asg_connection, group_name)[0] if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB': module.debug("Waiting for Target Group to consider instances healthy.") elbv2_connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) wait_timeout = time.time() + wait_timeout healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) module.debug("Target Group thinks %s instances are healthy." % healthy_instances) time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) module.debug("Waiting complete. Target Group thinks %s instances are healthy." % healthy_instances) def suspend_processes(ec2_connection, as_group): suspend_processes = set(module.params.get('suspend_processes')) try: suspended_processes = set([p['ProcessName'] for p in as_group['SuspendedProcesses']]) except AttributeError: # New ASG being created, no suspended_processes defined yet suspended_processes = set() if suspend_processes == suspended_processes: return False resume_processes = list(suspended_processes - suspend_processes) if resume_processes: resume_asg_processes(ec2_connection, module.params.get('name'), resume_processes) if suspend_processes: suspend_asg_processes(ec2_connection, module.params.get('name'), list(suspend_processes)) return True def create_autoscaling_group(connection): group_name = module.params.get('name') load_balancers = module.params['load_balancers'] target_group_arns = module.params['target_group_arns'] availability_zones = module.params['availability_zones'] launch_config_name = module.params.get('launch_config_name') launch_template = module.params.get('launch_template') min_size = module.params['min_size'] max_size = module.params['max_size'] placement_group = module.params.get('placement_group') desired_capacity = module.params.get('desired_capacity') vpc_zone_identifier = module.params.get('vpc_zone_identifier') set_tags = module.params.get('tags') health_check_period = module.params.get('health_check_period') health_check_type = module.params.get('health_check_type') default_cooldown = module.params.get('default_cooldown') wait_for_instances = module.params.get('wait_for_instances') wait_timeout = module.params.get('wait_timeout') termination_policies = module.params.get('termination_policies') notification_topic = module.params.get('notification_topic') notification_types = module.params.get('notification_types') metrics_collection = module.params.get('metrics_collection') metrics_granularity = module.params.get('metrics_granularity') metrics_list = module.params.get('metrics_list') try: as_groups = describe_autoscaling_groups(connection, group_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Failed to describe auto scaling groups.", exception=traceback.format_exc()) region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) ec2_connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) if vpc_zone_identifier: vpc_zone_identifier = ','.join(vpc_zone_identifier) asg_tags = [] for tag in set_tags: for k, v in tag.items(): if k != 'propagate_at_launch': asg_tags.append(dict(Key=k, Value=to_native(v), PropagateAtLaunch=bool(tag.get('propagate_at_launch', True)), ResourceType='auto-scaling-group', ResourceId=group_name)) if not as_groups: if not vpc_zone_identifier and not availability_zones: availability_zones = module.params['availability_zones'] = [zone['ZoneName'] for zone in ec2_connection.describe_availability_zones()['AvailabilityZones']] enforce_required_arguments_for_create() if desired_capacity is None: desired_capacity = min_size ag = dict( AutoScalingGroupName=group_name, MinSize=min_size, MaxSize=max_size, DesiredCapacity=desired_capacity, Tags=asg_tags, HealthCheckGracePeriod=health_check_period, HealthCheckType=health_check_type, DefaultCooldown=default_cooldown, TerminationPolicies=termination_policies) if vpc_zone_identifier: ag['VPCZoneIdentifier'] = vpc_zone_identifier if availability_zones: ag['AvailabilityZones'] = availability_zones if placement_group: ag['PlacementGroup'] = placement_group if load_balancers: ag['LoadBalancerNames'] = load_balancers if target_group_arns: ag['TargetGroupARNs'] = target_group_arns launch_object = get_launch_object(connection, ec2_connection) if 'LaunchConfigurationName' in launch_object: ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName'] elif 'LaunchTemplate' in launch_object: ag['LaunchTemplate'] = launch_object['LaunchTemplate'] else: module.fail_json(msg="Missing LaunchConfigurationName or LaunchTemplate", exception=traceback.format_exc()) try: create_asg(connection, **ag) if metrics_collection: connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list) all_ag = describe_autoscaling_groups(connection, group_name) if len(all_ag) == 0: module.fail_json(msg="No auto scaling group found with the name %s" % group_name) as_group = all_ag[0] suspend_processes(connection, as_group) if wait_for_instances: wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances') if load_balancers: wait_for_elb(connection, group_name) # Wait for target group health if target group(s)defined if target_group_arns: wait_for_target_group(connection, group_name) if notification_topic: put_notification_config(connection, group_name, notification_topic, notification_types) as_group = describe_autoscaling_groups(connection, group_name)[0] asg_properties = get_properties(as_group) changed = True return changed, asg_properties except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to create Autoscaling Group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except botocore.exceptions.BotoCoreError as e: module.fail_json(msg="Failed to create Autoscaling Group.", exception=traceback.format_exc()) else: as_group = as_groups[0] initial_asg_properties = get_properties(as_group) changed = False if suspend_processes(connection, as_group): changed = True # process tag changes if len(set_tags) > 0: have_tags = as_group.get('Tags') want_tags = asg_tags dead_tags = [] have_tag_keyvals = [x['Key'] for x in have_tags] want_tag_keyvals = [x['Key'] for x in want_tags] for dead_tag in set(have_tag_keyvals).difference(want_tag_keyvals): changed = True dead_tags.append(dict(ResourceId=as_group['AutoScalingGroupName'], ResourceType='auto-scaling-group', Key=dead_tag)) have_tags = [have_tag for have_tag in have_tags if have_tag['Key'] != dead_tag] if dead_tags: connection.delete_tags(Tags=dead_tags) zipped = zip(have_tags, want_tags) if len(have_tags) != len(want_tags) or not all(x == y for x, y in zipped): changed = True connection.create_or_update_tags(Tags=asg_tags) # Handle load balancer attachments/detachments # Attach load balancers if they are specified but none currently exist if load_balancers and not as_group['LoadBalancerNames']: changed = True try: attach_load_balancers(connection, group_name, load_balancers) except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to update Autoscaling Group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except botocore.exceptions.BotoCoreError as e: module.fail_json(msg="Failed to update Autoscaling Group.", exception=traceback.format_exc()) # Update load balancers if they are specified and one or more already exists elif as_group['LoadBalancerNames']: change_load_balancers = load_balancers is not None # Get differences if not load_balancers: load_balancers = list() wanted_elbs = set(load_balancers) has_elbs = set(as_group['LoadBalancerNames']) # check if all requested are already existing if has_elbs - wanted_elbs and change_load_balancers: # if wanted contains less than existing, then we need to delete some elbs_to_detach = has_elbs.difference(wanted_elbs) if elbs_to_detach: changed = True try: detach_load_balancers(connection, group_name, list(elbs_to_detach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Failed to detach load balancers %s: %s." % (elbs_to_detach, to_native(e)), exception=traceback.format_exc()) if wanted_elbs - has_elbs: # if has contains less than wanted, then we need to add some elbs_to_attach = wanted_elbs.difference(has_elbs) if elbs_to_attach: changed = True try: attach_load_balancers(connection, group_name, list(elbs_to_attach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Failed to attach load balancers %s: %s." % (elbs_to_attach, to_native(e)), exception=traceback.format_exc()) # Handle target group attachments/detachments # Attach target groups if they are specified but none currently exist if target_group_arns and not as_group['TargetGroupARNs']: changed = True try: attach_lb_target_groups(connection, group_name, target_group_arns) except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to update Autoscaling Group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except botocore.exceptions.BotoCoreError as e: module.fail_json(msg="Failed to update Autoscaling Group.", exception=traceback.format_exc()) # Update target groups if they are specified and one or more already exists elif target_group_arns is not None and as_group['TargetGroupARNs']: # Get differences wanted_tgs = set(target_group_arns) has_tgs = set(as_group['TargetGroupARNs']) # check if all requested are already existing if has_tgs.issuperset(wanted_tgs): # if wanted contains less than existing, then we need to delete some tgs_to_detach = has_tgs.difference(wanted_tgs) if tgs_to_detach: changed = True try: detach_lb_target_groups(connection, group_name, list(tgs_to_detach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Failed to detach load balancer target groups %s: %s" % (tgs_to_detach, to_native(e)), exception=traceback.format_exc()) if wanted_tgs.issuperset(has_tgs): # if has contains less than wanted, then we need to add some tgs_to_attach = wanted_tgs.difference(has_tgs) if tgs_to_attach: changed = True try: attach_lb_target_groups(connection, group_name, list(tgs_to_attach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Failed to attach load balancer target groups %s: %s" % (tgs_to_attach, to_native(e)), exception=traceback.format_exc()) # check for attributes that aren't required for updating an existing ASG # check if min_size/max_size/desired capacity have been specified and if not use ASG values if min_size is None: min_size = as_group['MinSize'] if max_size is None: max_size = as_group['MaxSize'] if desired_capacity is None: desired_capacity = as_group['DesiredCapacity'] ag = dict( AutoScalingGroupName=group_name, MinSize=min_size, MaxSize=max_size, DesiredCapacity=desired_capacity, HealthCheckGracePeriod=health_check_period, HealthCheckType=health_check_type, DefaultCooldown=default_cooldown, TerminationPolicies=termination_policies) # Get the launch object (config or template) if one is provided in args or use the existing one attached to ASG if not. launch_object = get_launch_object(connection, ec2_connection) if 'LaunchConfigurationName' in launch_object: ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName'] elif 'LaunchTemplate' in launch_object: ag['LaunchTemplate'] = launch_object['LaunchTemplate'] else: try: ag['LaunchConfigurationName'] = as_group['LaunchConfigurationName'] except Exception: launch_template = as_group['LaunchTemplate'] # Prefer LaunchTemplateId over Name as it's more specific. Only one can be used for update_asg. ag['LaunchTemplate'] = {"LaunchTemplateId": launch_template['LaunchTemplateId'], "Version": launch_template['Version']} if availability_zones: ag['AvailabilityZones'] = availability_zones if vpc_zone_identifier: ag['VPCZoneIdentifier'] = vpc_zone_identifier try: update_asg(connection, **ag) if metrics_collection: connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list) else: connection.disable_metrics_collection(AutoScalingGroupName=group_name, Metrics=metrics_list) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg="Failed to update autoscaling group: %s" % to_native(e), exception=traceback.format_exc()) if notification_topic: try: put_notification_config(connection, group_name, notification_topic, notification_types) except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to update Autoscaling Group notifications.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except botocore.exceptions.BotoCoreError as e: module.fail_json(msg="Failed to update Autoscaling Group notifications.", exception=traceback.format_exc()) if wait_for_instances: wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances') # Wait for ELB health if ELB(s)defined if load_balancers: module.debug('\tWAITING FOR ELB HEALTH') wait_for_elb(connection, group_name) # Wait for target group health if target group(s)defined if target_group_arns: module.debug('\tWAITING FOR TG HEALTH') wait_for_target_group(connection, group_name) try: as_group = describe_autoscaling_groups(connection, group_name)[0] asg_properties = get_properties(as_group) if asg_properties != initial_asg_properties: changed = True except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to read existing Autoscaling Groups.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except botocore.exceptions.BotoCoreError as e: module.fail_json(msg="Failed to read existing Autoscaling Groups.", exception=traceback.format_exc()) return changed, asg_properties def delete_autoscaling_group(connection): group_name = module.params.get('name') notification_topic = module.params.get('notification_topic') wait_for_instances = module.params.get('wait_for_instances') wait_timeout = module.params.get('wait_timeout') if notification_topic: del_notification_config(connection, group_name, notification_topic) groups = describe_autoscaling_groups(connection, group_name) if groups: wait_timeout = time.time() + wait_timeout if not wait_for_instances: delete_asg(connection, group_name, force_delete=True) else: updated_params = dict(AutoScalingGroupName=group_name, MinSize=0, MaxSize=0, DesiredCapacity=0) update_asg(connection, **updated_params) instances = True while instances and wait_for_instances and wait_timeout >= time.time(): tmp_groups = describe_autoscaling_groups(connection, group_name) if tmp_groups: tmp_group = tmp_groups[0] if not tmp_group.get('Instances'): instances = False time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) delete_asg(connection, group_name, force_delete=False) while describe_autoscaling_groups(connection, group_name) and wait_timeout >= time.time(): time.sleep(5) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for ASG to delete. %s" % time.asctime()) return True return False def get_chunks(l, n): for i in range(0, len(l), n): yield l[i:i + n] def update_size(connection, group, max_size, min_size, dc): module.debug("setting ASG sizes") module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size)) updated_group = dict() updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName'] updated_group['MinSize'] = min_size updated_group['MaxSize'] = max_size updated_group['DesiredCapacity'] = dc update_asg(connection, **updated_group) def replace(connection): batch_size = module.params.get('replace_batch_size') wait_timeout = module.params.get('wait_timeout') group_name = module.params.get('name') max_size = module.params.get('max_size') min_size = module.params.get('min_size') desired_capacity = module.params.get('desired_capacity') launch_config_name = module.params.get('launch_config_name') # Required to maintain the default value being set to 'true' if launch_config_name: lc_check = module.params.get('lc_check') else: lc_check = False # Mirror above behaviour for Launch Templates launch_template = module.params.get('launch_template') if launch_template: lt_check = module.params.get('lt_check') else: lt_check = False replace_instances = module.params.get('replace_instances') replace_all_instances = module.params.get('replace_all_instances') as_group = describe_autoscaling_groups(connection, group_name)[0] if desired_capacity is None: desired_capacity = as_group['DesiredCapacity'] wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances') props = get_properties(as_group) instances = props['instances'] if replace_all_instances: # If replacing all instances, then set replace_instances to current set # This allows replace_instances and replace_all_instances to behave same replace_instances = instances if replace_instances: instances = replace_instances # check to see if instances are replaceable if checking launch configs if launch_config_name: new_instances, old_instances = get_instances_by_launch_config(props, lc_check, instances) elif launch_template: new_instances, old_instances = get_instances_by_launch_template(props, lt_check, instances) num_new_inst_needed = desired_capacity - len(new_instances) if lc_check or lt_check: if num_new_inst_needed == 0 and old_instances: module.debug("No new instances needed, but old instances are present. Removing old instances") terminate_batch(connection, old_instances, instances, True) as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) changed = True return(changed, props) # we don't want to spin up extra instances if not necessary if num_new_inst_needed < batch_size: module.debug("Overriding batch size to %s" % num_new_inst_needed) batch_size = num_new_inst_needed if not old_instances: changed = False return(changed, props) # check if min_size/max_size/desired capacity have been specified and if not use ASG values if min_size is None: min_size = as_group['MinSize'] if max_size is None: max_size = as_group['MaxSize'] # set temporary settings and wait for them to be reached # This should get overwritten if the number of instances left is less than the batch size. as_group = describe_autoscaling_groups(connection, group_name)[0] update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size) wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances') wait_for_elb(connection, group_name) wait_for_target_group(connection, group_name) as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) instances = props['instances'] if replace_instances: instances = replace_instances module.debug("beginning main loop") for i in get_chunks(instances, batch_size): # break out of this loop if we have enough new instances break_early, desired_size, term_instances = terminate_batch(connection, i, instances, False) wait_for_term_inst(connection, term_instances) wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances') wait_for_elb(connection, group_name) wait_for_target_group(connection, group_name) as_group = describe_autoscaling_groups(connection, group_name)[0] if break_early: module.debug("breaking loop") break update_size(connection, as_group, max_size, min_size, desired_capacity) as_group = describe_autoscaling_groups(connection, group_name)[0] asg_properties = get_properties(as_group) module.debug("Rolling update complete.") changed = True return(changed, asg_properties) def get_instances_by_launch_config(props, lc_check, initial_instances): new_instances = [] old_instances = [] # old instances are those that have the old launch config if lc_check: for i in props['instances']: # Check if migrating from launch_template to launch_config first if 'launch_template' in props['instance_facts'][i]: old_instances.append(i) elif props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']: new_instances.append(i) else: old_instances.append(i) else: module.debug("Comparing initial instances with current: %s" % initial_instances) for i in props['instances']: if i not in initial_instances: new_instances.append(i) else: old_instances.append(i) module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) return new_instances, old_instances def get_instances_by_launch_template(props, lt_check, initial_instances): new_instances = [] old_instances = [] # old instances are those that have the old launch template or version of the same launch templatec if lt_check: for i in props['instances']: # Check if migrating from launch_config_name to launch_template_name first if 'launch_config_name' in props['instance_facts'][i]: old_instances.append(i) elif props['instance_facts'][i]['launch_template'] == props['launch_template']: new_instances.append(i) else: old_instances.append(i) else: module.debug("Comparing initial instances with current: %s" % initial_instances) for i in props['instances']: if i not in initial_instances: new_instances.append(i) else: old_instances.append(i) module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) return new_instances, old_instances def list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances): instances_to_terminate = [] instances = (inst_id for inst_id in replace_instances if inst_id in props['instances']) # check to make sure instances given are actually in the given ASG # and they have a non-current launch config if module.params.get('launch_config_name'): if lc_check: for i in instances: if 'launch_template' in props['instance_facts'][i]: instances_to_terminate.append(i) elif props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']: instances_to_terminate.append(i) else: for i in instances: if i in initial_instances: instances_to_terminate.append(i) elif module.params.get('launch_template'): if lt_check: for i in instances: if 'launch_config_name' in props['instance_facts'][i]: instances_to_terminate.append(i) elif props['instance_facts'][i]['launch_template'] != props['launch_template']: instances_to_terminate.append(i) else: for i in instances: if i in initial_instances: instances_to_terminate.append(i) return instances_to_terminate def terminate_batch(connection, replace_instances, initial_instances, leftovers=False): batch_size = module.params.get('replace_batch_size') min_size = module.params.get('min_size') desired_capacity = module.params.get('desired_capacity') group_name = module.params.get('name') lc_check = module.params.get('lc_check') lt_check = module.params.get('lt_check') decrement_capacity = False break_loop = False as_group = describe_autoscaling_groups(connection, group_name)[0] if desired_capacity is None: desired_capacity = as_group['DesiredCapacity'] props = get_properties(as_group) desired_size = as_group['MinSize'] if module.params.get('launch_config_name'): new_instances, old_instances = get_instances_by_launch_config(props, lc_check, initial_instances) else: new_instances, old_instances = get_instances_by_launch_template(props, lt_check, initial_instances) num_new_inst_needed = desired_capacity - len(new_instances) # check to make sure instances given are actually in the given ASG # and they have a non-current launch config instances_to_terminate = list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances) module.debug("new instances needed: %s" % num_new_inst_needed) module.debug("new instances: %s" % new_instances) module.debug("old instances: %s" % old_instances) module.debug("batch instances: %s" % ",".join(instances_to_terminate)) if num_new_inst_needed == 0: decrement_capacity = True if as_group['MinSize'] != min_size: if min_size is None: min_size = as_group['MinSize'] updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size) update_asg(connection, **updated_params) module.debug("Updating minimum size back to original of %s" % min_size) # if are some leftover old instances, but we are already at capacity with new ones # we don't want to decrement capacity if leftovers: decrement_capacity = False break_loop = True instances_to_terminate = old_instances desired_size = min_size module.debug("No new instances needed") if num_new_inst_needed < batch_size and num_new_inst_needed != 0: instances_to_terminate = instances_to_terminate[:num_new_inst_needed] decrement_capacity = False break_loop = False module.debug("%s new instances needed" % num_new_inst_needed) module.debug("decrementing capacity: %s" % decrement_capacity) for instance_id in instances_to_terminate: elb_dreg(connection, group_name, instance_id) module.debug("terminating instance: %s" % instance_id) terminate_asg_instance(connection, instance_id, decrement_capacity) # we wait to make sure the machines we marked as Unhealthy are # no longer in the list return break_loop, desired_size, instances_to_terminate def wait_for_term_inst(connection, term_instances): wait_timeout = module.params.get('wait_timeout') group_name = module.params.get('name') as_group = describe_autoscaling_groups(connection, group_name)[0] count = 1 wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and count > 0: module.debug("waiting for instances to terminate") count = 0 as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) instance_facts = props['instance_facts'] instances = (i for i in instance_facts if i in term_instances) for i in instances: lifecycle = instance_facts[i]['lifecycle_state'] health = instance_facts[i]['health_status'] module.debug("Instance %s has state of %s,%s" % (i, lifecycle, health)) if lifecycle.startswith('Terminating') or health == 'Unhealthy': count += 1 time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop): # make sure we have the latest stats after that last loop. as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) # now we make sure that we have enough instances in a viable state wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and desired_size > props[prop]: module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) time.sleep(10) as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime()) module.debug("Reached %s: %s" % (prop, desired_size)) return props def asg_exists(connection): group_name = module.params.get('name') as_group = describe_autoscaling_groups(connection, group_name) return bool(len(as_group)) def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True, type='str'), load_balancers=dict(type='list'), target_group_arns=dict(type='list'), availability_zones=dict(type='list'), launch_config_name=dict(type='str'), launch_template=dict(type='dict', default=None, options=dict( version=dict(type='str'), launch_template_name=dict(type='str'), launch_template_id=dict(type='str'), ), ), min_size=dict(type='int'), max_size=dict(type='int'), placement_group=dict(type='str'), desired_capacity=dict(type='int'), vpc_zone_identifier=dict(type='list'), replace_batch_size=dict(type='int', default=1), replace_all_instances=dict(type='bool', default=False), replace_instances=dict(type='list', default=[]), lc_check=dict(type='bool', default=True), lt_check=dict(type='bool', default=True), wait_timeout=dict(type='int', default=300), state=dict(default='present', choices=['present', 'absent']), tags=dict(type='list', default=[]), health_check_period=dict(type='int', default=300), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), default_cooldown=dict(type='int', default=300), wait_for_instances=dict(type='bool', default=True), termination_policies=dict(type='list', default='Default'), notification_topic=dict(type='str', default=None), notification_types=dict(type='list', default=[ 'autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' ]), suspend_processes=dict(type='list', default=[]), metrics_collection=dict(type='bool', default=False), metrics_granularity=dict(type='str', default='1Minute'), metrics_list=dict(type='list', default=[ 'GroupMinSize', 'GroupMaxSize', 'GroupDesiredCapacity', 'GroupInServiceInstances', 'GroupPendingInstances', 'GroupStandbyInstances', 'GroupTerminatingInstances', 'GroupTotalInstances' ]) ), ) global module module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ ['replace_all_instances', 'replace_instances'], ['launch_config_name', 'launch_template']] ) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') state = module.params.get('state') replace_instances = module.params.get('replace_instances') replace_all_instances = module.params.get('replace_all_instances') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_params) changed = create_changed = replace_changed = False exists = asg_exists(connection) if state == 'present': create_changed, asg_properties = create_autoscaling_group(connection) elif state == 'absent': changed = delete_autoscaling_group(connection) module.exit_json(changed=changed) # Only replace instances if asg existed at start of call if exists and (replace_all_instances or replace_instances) and (module.params.get('launch_config_name') or module.params.get('launch_template')): replace_changed, asg_properties = replace(connection) if create_changed or replace_changed: changed = True module.exit_json(changed=changed, **asg_properties) if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,085
ec2_asg tagging results in "modified" status if tags are not alphabetized
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY When the ec2_asg module attempts to evaluate whether tags for an ASG have changed, it compares the input list of tags (in the order given) to the API result (in the order returned by the API, A-Za-z). If the ordering is not the same (non-alphabetical input), the module believes that modifications have been made, and issues API calls to replace the tags. This is due to the [use](https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/cloud/amazon/ec2_asg.py#L1047) of `zip()` without respect to key ordering. This can be resolved by sorting the input dictionaries by key prior to `zip()`, or by switching to something [like](https://github.com/ansible/ansible/blob/73e171fd946b74089a99051858d8d49e561dea41/lib/ansible/module_utils/ec2.py#L706) `compare_aws_tags()` from `ansible.module_utils.ec2`. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ec2_asg ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.8.4 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> N/A ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml - ec2_asg: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] launch_config_name: 'lc-1' min_size: 1 max_size: 10 desired_capacity: 5 vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] tags: - environment: production Propagate_at_launch: no ``` This will always show modification, as the tags are listed in an order different than the API response. <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Tags are evaluated by matching keys, without regard to alphabetical input ordering ##### ACTUAL RESULTS Ansible shows modifications have occurred and updates the ASG's tags, even though no true modifications have been made.
https://github.com/ansible/ansible/issues/61085
https://github.com/ansible/ansible/pull/61284
d7604844c2a489bb13216dd6340345ac2bb1df7f
b8650c0a50eb76aa1146ea7119d3451e2253037f
2019-08-22T02:48:10Z
python
2019-09-06T19:48:40Z
test/integration/targets/ec2_asg/tasks/main.yml
--- # tasks file for test_ec2_asg - name: Test incomplete credentials with ec2_asg block: # ============================================================ - name: test invalid profile ec2_asg: name: "{{ resource_prefix }}-asg" region: "{{ aws_region }}" profile: notavalidprofile ignore_errors: yes register: result - name: assert: that: - "'The config profile (notavalidprofile) could not be found' in result.msg" - name: test partial credentials ec2_asg: name: "{{ resource_prefix }}-asg" region: "{{ aws_region }}" aws_access_key: "{{ aws_access_key }}" ignore_errors: yes register: result - name: assert: that: - "'Partial credentials found in explicit, missing: aws_secret_access_key' in result.msg" - name: test without specifying region ec2_asg: name: "{{ resource_prefix }}-asg" aws_access_key: "{{ aws_access_key }}" aws_secret_key: "{{ aws_secret_key }}" security_token: "{{ security_token | default(omit) }}" ignore_errors: yes register: result - name: assert: that: - result.msg == 'The ec2_asg module requires a region and none was found in configuration, environment variables or module parameters' # ============================================================ - name: Test incomplete arguments with ec2_asg block: # ============================================================ - name: test without specifying required module options ec2_asg: aws_access_key: "{{ aws_access_key }}" aws_secret_key: "{{ aws_secret_key }}" security_token: "{{ security_token | default(omit) }}" ignore_errors: yes register: result - name: assert name is a required module option assert: that: - "result.msg == 'missing required arguments: name'" - name: Run ec2_asg integration tests. module_defaults: group/aws: aws_access_key: "{{ aws_access_key }}" aws_secret_key: "{{ aws_secret_key }}" security_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: # ============================================================ - name: Find AMI to use ec2_ami_info: owners: 'amazon' filters: name: '{{ ec2_ami_name }}' register: ec2_amis - set_fact: ec2_ami_image: '{{ ec2_amis.images[0].image_id }}' - name: load balancer name has to be less than 32 characters # the 8 digit identifier at the end of resource_prefix helps determine during which test something # was created set_fact: load_balancer_name: "{{ item }}-lb" with_items: "{{ resource_prefix | regex_findall('.{8}$') }}" # Set up the testing dependencies: VPC, subnet, security group, and two launch configurations - name: Create VPC for use in testing ec2_vpc_net: name: "{{ resource_prefix }}-vpc" cidr_block: 10.55.77.0/24 tenancy: default register: testing_vpc - name: Create internet gateway for use in testing ec2_vpc_igw: vpc_id: "{{ testing_vpc.vpc.id }}" state: present register: igw - name: Create subnet for use in testing ec2_vpc_subnet: state: present vpc_id: "{{ testing_vpc.vpc.id }}" cidr: 10.55.77.0/24 az: "{{ aws_region }}a" resource_tags: Name: "{{ resource_prefix }}-subnet" register: testing_subnet - name: create routing rules ec2_vpc_route_table: vpc_id: "{{ testing_vpc.vpc.id }}" tags: created: "{{ resource_prefix }}-route" routes: - dest: 0.0.0.0/0 gateway_id: "{{ igw.gateway_id }}" subnets: - "{{ testing_subnet.subnet.id }}" - name: create a security group with the vpc created in the ec2_setup ec2_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" rules: - proto: tcp from_port: 22 to_port: 22 cidr_ip: 0.0.0.0/0 - proto: tcp from_port: 80 to_port: 80 cidr_ip: 0.0.0.0/0 register: sg - name: ensure launch configs exist ec2_lc: name: "{{ item }}" assign_public_ip: true image_id: "{{ ec2_ami_image }}" user_data: | #cloud-config package_upgrade: true package_update: true packages: - httpd runcmd: - "service httpd start" security_groups: "{{ sg.group_id }}" instance_type: t3.micro with_items: - "{{ resource_prefix }}-lc" - "{{ resource_prefix }}-lc-2" # ============================================================ - name: launch asg and wait for instances to be deemed healthy (no ELB) ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc" desired_capacity: 1 min_size: 1 max_size: 1 vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" state: present wait_for_instances: yes register: output - assert: that: - "output.viable_instances == 1" # - name: pause for a bit to make sure that the group can't be trivially deleted # pause: seconds=30 - name: kill asg ec2_asg: name: "{{ resource_prefix }}-asg" state: absent wait_timeout: 800 async: 400 # ============================================================ - name: launch asg and do not wait for instances to be deemed healthy (no ELB) ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc" desired_capacity: 1 min_size: 1 max_size: 1 vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" wait_for_instances: no state: present register: output - assert: that: - "output.viable_instances == 0" - name: kill asg ec2_asg: name: "{{ resource_prefix }}-asg" state: absent wait_timeout: 800 async: 400 # ============================================================ - name: create asg with asg metrics enabled ec2_asg: name: "{{ resource_prefix }}-asg" metrics_collection: true launch_config_name: "{{ resource_prefix }}-lc" desired_capacity: 0 min_size: 0 max_size: 0 vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" state: present register: output - assert: that: - "'Group' in output.metrics_collection.0.Metric" - name: kill asg ec2_asg: name: "{{ resource_prefix }}-asg" state: absent wait_timeout: 800 async: 400 # ============================================================ - name: launch load balancer ec2_elb_lb: name: "{{ load_balancer_name }}" state: present security_group_ids: - "{{ sg.group_id }}" subnets: "{{ testing_subnet.subnet.id }}" connection_draining_timeout: 60 listeners: - protocol: http load_balancer_port: 80 instance_port: 80 health_check: ping_protocol: tcp ping_port: 80 ping_path: "/" response_timeout: 5 interval: 10 unhealthy_threshold: 4 healthy_threshold: 2 register: load_balancer - name: launch asg and wait for instances to be deemed healthy (ELB) ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc" health_check_type: ELB desired_capacity: 1 min_size: 1 max_size: 1 health_check_period: 300 vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" load_balancers: "{{ load_balancer_name }}" wait_for_instances: yes wait_timeout: 900 state: present register: output - assert: that: - "output.viable_instances == 1" # ============================================================ # grow scaling group to 3 - name: add 2 more instances wait for instances to be deemed healthy (ELB) ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc" health_check_type: ELB desired_capacity: 3 min_size: 3 max_size: 5 health_check_period: 600 vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" load_balancers: "{{ load_balancer_name }}" wait_for_instances: yes wait_timeout: 1200 state: present register: output - assert: that: - "output.viable_instances == 3" # ============================================================ # # perform rolling replace with different launch configuration - name: perform rolling update to new AMI ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc-2" health_check_type: ELB desired_capacity: 3 min_size: 1 max_size: 5 health_check_period: 900 load_balancers: "{{ load_balancer_name }}" vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" wait_for_instances: yes replace_all_instances: yes wait_timeout: 1800 state: present register: output # ensure that all instances have new launch config - assert: that: - "item.value.launch_config_name == '{{ resource_prefix }}-lc-2'" with_dict: "{{ output.instance_facts }}" # assert they are all healthy and that the rolling update resulted in the appropriate number of instances - assert: that: - "output.viable_instances == 3" # ============================================================ # perform rolling replace with the original launch configuration - name: perform rolling update to new AMI while removing the load balancer ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc" health_check_type: EC2 desired_capacity: 3 min_size: 1 max_size: 5 health_check_period: 900 load_balancers: [] vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" wait_for_instances: yes replace_all_instances: yes wait_timeout: 1800 state: present register: output # ensure that all instances have new launch config - assert: that: - "item.value.launch_config_name == '{{ resource_prefix }}-lc'" with_dict: "{{ output.instance_facts }}" # assert they are all healthy and that the rolling update resulted in the appropriate number of instances # there should be the same number of instances as there were before the rolling update was performed - assert: that: - "output.viable_instances == 3" # ============================================================ # perform rolling replace with new launch configuration and lc_check:false # Note - this is done async so we can query asg_facts during # the execution. Issues #28087 and #35993 result in correct # end result, but spin up extraneous instances during execution. - name: "perform rolling update to new AMI with lc_check: false" ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc-2" health_check_type: EC2 desired_capacity: 3 min_size: 1 max_size: 5 health_check_period: 900 load_balancers: [] vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" wait_for_instances: yes replace_all_instances: yes replace_batch_size: 3 lc_check: false wait_timeout: 1800 state: present async: 1800 poll: 0 register: asg_job - name: get ec2_asg facts for 3 minutes ec2_asg_info: name: "{{ resource_prefix }}-asg" register: output loop_control: pause: 15 with_sequence: count=12 - set_fact: inst_id_json_query: 'results[*].results[*].instances[*].instance_id' # Since we started with 3 servers and replace all of them. # We should see 6 servers total. - assert: that: - "lookup('flattened',output|json_query(inst_id_json_query)).split(',')|unique|length == 6" - name: Ensure ec2_asg task completes async_status: jid="{{ asg_job.ansible_job_id }}" register: status until: status is finished retries: 200 delay: 15 # ============================================================ - name: kill asg ec2_asg: name: "{{ resource_prefix }}-asg" state: absent wait_timeout: 800 async: 400 # Create new asg with replace_all_instances and lc_check:false # Note - this is done async so we can query asg_facts during # the execution. Issues #28087 results in correct # end result, but spin up extraneous instances during execution. - name: "new asg with lc_check: false" ec2_asg: name: "{{ resource_prefix }}-asg" launch_config_name: "{{ resource_prefix }}-lc" health_check_type: EC2 desired_capacity: 3 min_size: 1 max_size: 5 health_check_period: 900 load_balancers: [] vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" wait_for_instances: yes replace_all_instances: yes replace_batch_size: 3 lc_check: false wait_timeout: 1800 state: present async: 1800 poll: 0 register: asg_job # Collect ec2_asg_info for 3 minutes - name: get ec2_asg information ec2_asg_info: name: "{{ resource_prefix }}-asg" register: output loop_control: pause: 15 with_sequence: count=12 - set_fact: inst_id_json_query: 'results[*].results[*].instances[*].instance_id' # Get all instance_ids we saw and assert we saw number expected # Should only see 3 (don't replace instances we just created) - assert: that: - "lookup('flattened',output|json_query(inst_id_json_query)).split(',')|unique|length == 3" - name: Ensure ec2_asg task completes async_status: jid="{{ asg_job.ansible_job_id }}" register: status until: status is finished retries: 200 delay: 15 # ============================================================ always: - name: kill asg ec2_asg: name: "{{ resource_prefix }}-asg" state: absent register: removed until: removed is not failed ignore_errors: yes retries: 10 # Remove the testing dependencies - name: remove the load balancer ec2_elb_lb: name: "{{ load_balancer_name }}" state: absent security_group_ids: - "{{ sg.group_id }}" subnets: "{{ testing_subnet.subnet.id }}" wait: yes connection_draining_timeout: 60 listeners: - protocol: http load_balancer_port: 80 instance_port: 80 health_check: ping_protocol: tcp ping_port: 80 ping_path: "/" response_timeout: 5 interval: 10 unhealthy_threshold: 4 healthy_threshold: 2 register: removed until: removed is not failed ignore_errors: yes retries: 10 - name: remove launch configs ec2_lc: name: "{{ resource_prefix }}-lc" state: absent register: removed until: removed is not failed ignore_errors: yes retries: 10 with_items: - "{{ resource_prefix }}-lc" - "{{ resource_prefix }}-lc-2" - name: remove the security group ec2_group: name: "{{ resource_prefix }}-sg" description: a security group for ansible tests vpc_id: "{{ testing_vpc.vpc.id }}" state: absent register: removed until: removed is not failed ignore_errors: yes retries: 10 - name: remove routing rules ec2_vpc_route_table: state: absent vpc_id: "{{ testing_vpc.vpc.id }}" tags: created: "{{ resource_prefix }}-route" routes: - dest: 0.0.0.0/0 gateway_id: "{{ igw.gateway_id }}" subnets: - "{{ testing_subnet.subnet.id }}" register: removed until: removed is not failed ignore_errors: yes retries: 10 - name: remove internet gateway ec2_vpc_igw: vpc_id: "{{ testing_vpc.vpc.id }}" state: absent register: removed until: removed is not failed ignore_errors: yes retries: 10 - name: remove the subnet ec2_vpc_subnet: state: absent vpc_id: "{{ testing_vpc.vpc.id }}" cidr: 10.55.77.0/24 register: removed until: removed is not failed ignore_errors: yes retries: 10 - name: remove the VPC ec2_vpc_net: name: "{{ resource_prefix }}-vpc" cidr_block: 10.55.77.0/24 state: absent register: removed until: removed is not failed ignore_errors: yes retries: 10
closed
ansible/ansible
https://github.com/ansible/ansible
61,738
ecs_certificate chain not in standard format
##### SUMMARY The file contents that end up as the output of ecs_certificates "full_chain_path" are not a valid format for import into, for example, a PKCS12 store. This is the result of the return value of the ECS API being an array, and not properly turning that array into concatenated certificates for the resulting PEM file. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ecs_certificate ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> Applies to ansible 2.9.0.dev0 ##### CONFIGURATION N/A affects all configurations ##### OS / ENVIRONMENT N/A affects all OS/environment ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Populate full_chain_path and inspect output ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> A properly formatted PEM certificate chain ##### ACTUAL RESULTS A certificate chain in format ['CERT','CERT']
https://github.com/ansible/ansible/issues/61738
https://github.com/ansible/ansible/pull/61858
cac93cbd1f041eac5045250a6663644cdbba3df8
943888b9553bca40b18c3922f508645d09f53392
2019-09-03T20:47:14Z
python
2019-09-07T05:58:25Z
changelogs/fragments/61738-ecs-certificate-invalid-chain.yaml
closed
ansible/ansible
https://github.com/ansible/ansible
61,738
ecs_certificate chain not in standard format
##### SUMMARY The file contents that end up as the output of ecs_certificates "full_chain_path" are not a valid format for import into, for example, a PKCS12 store. This is the result of the return value of the ECS API being an array, and not properly turning that array into concatenated certificates for the resulting PEM file. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ecs_certificate ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> Applies to ansible 2.9.0.dev0 ##### CONFIGURATION N/A affects all configurations ##### OS / ENVIRONMENT N/A affects all OS/environment ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Populate full_chain_path and inspect output ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> A properly formatted PEM certificate chain ##### ACTUAL RESULTS A certificate chain in format ['CERT','CERT']
https://github.com/ansible/ansible/issues/61738
https://github.com/ansible/ansible/pull/61858
cac93cbd1f041eac5045250a6663644cdbba3df8
943888b9553bca40b18c3922f508645d09f53392
2019-09-03T20:47:14Z
python
2019-09-07T05:58:25Z
lib/ansible/modules/crypto/entrust/ecs_certificate.py
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c), Entrust Datacard Corporation, 2019 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ecs_certificate author: - Chris Trufan (@ctrufan) version_added: '2.9' short_description: Request SSL/TLS certificates with the Entrust Certificate Services (ECS) API description: - Create, reissue, and renew certificates with the Entrust Certificate Services (ECS) API. - Requires credentials for the L(Entrust Certificate Services,https://www.entrustdatacard.com/products/categories/ssl-certificates) (ECS) API. - In order to request a certificate, the domain and organization used in the certificate signing request must be already validated in the ECS system. It is I(not) the responsibility of this module to perform those steps. notes: - C(path) must be specified as the output location of the certificate. requirements: - cryptography >= 1.6 options: backup: description: - Path to store a backup of the initial certificate, if I(path) pointed to an existing file certificate. type: bool default: false force: description: - If force is used, a certificate is requested regardless of whether I(path) points to an existing valid certificate. - If C(request_type=renew), a forced renew will fail if the certificate being renewed has been issued within the past 30 days, regardless of the value of I(remaining_days) or the return value of I(cert_days) - the ECS API does not support the "renew" operation for certificates that are not at least 30 days old. type: bool default: false path: description: - Path to put the certificate file as a PEM encoded cert. - If the certificate at this location is not an Entrust issued certificate, a new certificate will always be requested regardless of validity. - If there is already an Entrust certificate at this location, whether it is replaced is dependent upon the I(remaining_days) calculation. - If an existing certificate is being replaced (see I(remaining_days), I(force), I(tracking_id)), the operation taken to replace it is dependent on I(request_type) type: path required: true full_chain_path: description: - Path to put the full certificate chain of the certificate, intermediates, and roots. type: path csr: description: - Base-64 encoded Certificate Signing Request (CSR). I(csr) is accepted with or without PEM formatting around the Base-64 string. - If no I(csr) is provided when C(request_type=reissue) or C(request_type=renew), the certificate will be generated with the same public key as the certificate being renewed or reissued. - If I(subject_alt_name) is specified, it will override the subject alternate names in the CSR. - If I(eku) is specified, it will override the extended key usage in the CSR. - If I(ou) is specified, it will override the organizational units "ou=" present in the subject distinguished name of the CSR, if any. - The organization "O=" field from the CSR will not be used. It will be replaced in the issued certificate by I(org) if present, and if not present, the organization tied to I(client_id). type: str tracking_id: description: - Tracking ID of certificate to reissue or renew. - I(tracking_id) is invalid if C(request_type=new) or C(request_type=validate_only). - If there is a certificate present in I(path) and it is an ECS certificate, I(tracking_id) will be ignored. - If there is not a certificate present in I(path) or there is but it is from another provider, the certificate represented by I(tracking_id) will be renewed or reissued and saved to I(path). - If there is not a certificate present in I(path) and the I(force) and I(remaining_days) parameters do not indicate a new certificate is needed, the certificate referenced by I(tracking_id) certificate will be saved to I(path). - This can be used when a known certificate is not currently present on a server, but you want to renew or reissue it to be managed by an ansible playbook. For example, if you specify C(request_type=renew), I(tracking_id) of an issued certificate, and I(path) to a file that does not exist, the first run of a task will download the certificate specified by I(tracking_id) (assuming it is still valid), and future runs of the task will (if applicable - see I(force) and I(remaining_days)) renew the certificate now present in I(path). type: int remaining_days: description: - The number of days the certificate must have left being valid. If C(cert_days < remaining_days) then a new certificate will be obtained using I(request_type). - If C(request_type=renew), a renew will fail if the certificate being renewed has been issued within the past 30 days, so do not set a I(remaining_days) value that is within 30 days of the full lifetime of the certificate being acted upon. (e.g. if you are requesting Certificates with a 90 day lifetime, do not set remaining_days to a value C(60) or higher). - The I(force) option may be used to ensure that a new certificate is always obtained. type: int default: 30 request_type: description: - Operation performed if I(tracking_id) references a valid certificate to reissue, or there is already a certificate present in I(path) but either I(force) is specified or C(cert_days < remaining_days). - Specifying C(request_type=validate_only) means the request will be validated against the ECS API, but no certificate will be issued. - Specifying C(request_type=new) means a certificate request will always be submitted and a new certificate issued. - Specifying C(request_type=renew) means that an existing certificate (specified by I(tracking_id) if present, otherwise I(path)) will be renewed. If there is no certificate to renew, a new certificate is requested. - Specifying C(request_type=reissue) means that an existing certificate (specified by I(tracking_id) if present, otherwise I(path)) will be reissued. If there is no certificate to reissue, a new certificate is requested. - If a certificate was issued within the past 30 days, the 'renew' operation is not a valid operation and will fail. - Note that C(reissue) is an operation that will result in the revocation of the certificate that is reissued, be cautious with it's use. - I(check_mode) is only supported if C(request_type=new) - For example, setting C(request_type=renew) and C(remaining_days=30) and pointing to the same certificate on multiple playbook runs means that on the first run new certificate will be requested. It will then be left along on future runs until it is within 30 days of expiry, then the ECS "renew" operation will be performed. type: str choices: [ 'new', 'renew', 'reissue', 'validate_only'] default: new cert_type: description: - The type of certificate product to request. - If a certificate is being reissued or renewed, this parameter is ignored, and the C(cert_type) of the initial certificate is used. type: str choices: [ 'STANDARD_SSL', 'ADVANTAGE_SSL', 'UC_SSL', 'EV_SSL', 'WILDCARD_SSL', 'PRIVATE_SSL', 'PD_SSL', 'CODE_SIGNING', 'EV_CODE_SIGNING', 'CDS_INDIVIDUAL', 'CDS_GROUP', 'CDS_ENT_LITE', 'CDS_ENT_PRO', 'SMIME_ENT' ] subject_alt_name: description: - The subject alternative name identifiers, as an array of values (applies to I(cert_type) with a value of C(STANDARD_SSL), C(ADVANTAGE_SSL), C(UC_SSL), C(EV_SSL), C(WILDCARD_SSL), C(PRIVATE_SSL), and C(PD_SSL)). - If you are requesting a new SSL certificate, and you pass a I(subject_alt_name) parameter, any SAN names in the CSR are ignored. If no subjectAltName parameter is passed, the SAN names in the CSR are used. - See I(request_type) to understand more about SANs during reissues and renewals. - In the case of certificates of type C(STANDARD_SSL) certificates, if the CN of the certificate is <domain>.<tld> only the www.<domain>.<tld> value is accepted. If the CN of the certificate is www.<domain>.<tld> only the <domain>.<tld> value is accepted. type: list eku: description: - If specified, overrides the key usage in the I(csr). type: str choices: [ SERVER_AUTH, CLIENT_AUTH, SERVER_AND_CLIENT_AUTH ] ct_log: description: - In compliance with browser requirements, this certificate may be posted to the Certificate Transparency (CT) logs. This is a best practice technique that helps domain owners monitor certificates issued to their domains. Note that not all certificates are eligible for CT logging. - If I(ct_log) is not specified, the certificate uses the account default. - If I(ct_log) is specified and the account settings allow it, I(ct_log) overrides the account default. - If I(ct_log) is set to C(false), but the account settings are set to "always log", the certificate generation will fail. type: bool client_id: description: - The client ID to submit the Certificate Signing Request under. - If no client ID is specified, the certificate will be submitted under the primary client with ID of 1. - When using a client other than the primary client, the I(org) parameter cannot be specified. - The issued certificate will have an organization value in the subject distinguished name represented by the client. type: int default: 1 org: description: - Organization "O=" to include in the certificate. - If I(org) is not specified, the organization from the client represented by I(client_id) is used. - Unless the I(cert_type) is C(PD_SSL), this field may not be specified if the value of I(client_id) is not the primary client of "1". For all non-primary clients, certificates may only be issued with the organization of that client. type: str ou: description: - Organizational unit "OU=" to include in the certificate. - I(ou) behavior is dependent on whether organizational units are enabled for your account. If organizational unit support is disabled for your account, organizational units from the I(csr) and the I(ou) parameter are ignored. - If both I(csr) and I(ou) are specified, the value in I(ou) will override the OU fields present in the subject distinguished name in the I(csr) - If neither I(csr) nor I(ou) are specified for a renew or reissue operation, the OU fields in the initial certificate are reused. - An invalid OU from I(csr) is ignored, but any invalid organizational units in I(ou) will result in an error indicating "Unapproved OU". The I(ou) parameter can be used to force failure if an unapproved organizational unit is provided. - A maximum of one OU may be specified for current products. Multiple OUs are reserved for future products. type: list end_user_key_storage_agreement: description: - The end user of the Code Signing certificate must generate and store the private key for this request on cryptographically secure hardware to be compliant with the Entrust CSP and Subscription agreement. If requesting a certificate of type C(CODE_SIGNING) or C(EV_CODE_SIGNING), you must set I(end_user_key_storage_agreement) to true if and only if you acknowledge that you will inform the user of this requirement. - Applicable only to I(cert_type) of values C(CODE_SIGNING) and C(EV_CODE_SIGNING). type: bool tracking_info: description: Free form tracking information to attach to the record for the certificate. type: str requester_name: description: Requester name to associate with certificate tracking information. type: str required: true requester_email: description: Requester email to associate with certificate tracking information and receive delivery and expiry notices for the certificate. type: str required: true requester_phone: description: Requester phone number to associate with certificate tracking information. type: str required: true additional_emails: description: A list of additional email addresses to receive the delivery notice and expiry notification for the certificate. type: list custom_fields: description: - Mapping of custom fields to associate with the certificate request and certificate. - Only supported if custom fields are enabled for your account. - Each custom field specified must be a custom field you have defined for your account. type: dict suboptions: text1: description: Custom text field of maximum size 500. type: str text2: description: Custom text field of maximum size 500. type: str text3: description: Custom text field of maximum size 500. type: str text4: description: Custom text field of maximum size 500. type: str text5: description: Custom text field of maximum size 500. type: str text6: description: Custom text field of maximum size 500. type: str text7: description: Custom text field of maximum size 500. type: str text8: description: Custom text field of maximum size 500. type: str text9: description: Custom text field of maximum size 500. type: str text10: description: Custom text field of maximum size 500. type: str text11: description: Custom text field of maximum size 500. type: str text12: description: Custom text field of maximum size 500. type: str text13: description: Custom text field of maximum size 500. type: str text14: description: Custom text field of maximum size 500. type: str text15: description: Custom text field of maximum size 500. type: str number1: description: Custom number field. type: float number2: description: Custom number field. type: float number3: description: Custom number field. type: float number4: description: Custom number field. type: float number5: description: Custom number field. type: float date1: description: Custom date field. type: str date2: description: Custom date field. type: str date3: description: Custom date field. type: str date4: description: Custom date field. type: str date5: description: Custom date field. type: str email1: description: Custom email field. type: str email2: description: Custom email field. type: str email3: description: Custom email field. type: str email4: description: Custom email field. type: str email5: description: Custom email field. type: str dropdown1: description: Custom dropdown field. type: str dropdown2: description: Custom dropdown field. type: str dropdown3: description: Custom dropdown field. type: str dropdown4: description: Custom dropdown field. type: str dropdown5: description: Custom dropdown field. type: str cert_expiry: description: - The date the certificate should be set to expire, as an RFC3339 compliant date or date-time. For example, C(2020-02-23), C(2020-02-23T15:00:00.05Z). - I(cert_expiry) is only supported for requests of C(request_type=new) or C(request_type=renew). If C(request_type=reissue), I(cert_expiry) will be used for the first certificate issuance, but subsequent issuances will have the same expiry as the initial certificate. - A reissued certificate will always have the same expiry as the original certificate. - Note that only the date (day, month, year) is supported for specifying expiry date. If you choose to specify an expiry time with the expiry date, the time will be adjusted to Eastern Standard Time (EST). This could have the unintended effect of moving your expiry date to the previous day. - Applies only to accounts with a pooling inventory model. - Only one of I(cert_expiry) or I(cert_lifetime) may be specified. type: str cert_lifetime: description: - The lifetime of the certificate. - Applies to all certificates for accounts with a non-pooling inventory model. - I(cert_lifetime) is only supported for requests of C(request_type=new) or C(request_type=renew). If C(request_type=reissue), I(cert_lifetime) will be used for the first certificate issuance, but subsequent issuances will have the same expiry as the initial certificate. - Applies to certificates of I(cert_type)=C(CDS_INDIVIDUAL, CDS_GROUP, CDS_ENT_LITE, CDS_ENT_PRO, SMIME_ENT) for accounts with a pooling inventory model. - C(P1Y) is a certificate with a 1 year lifetime. - C(P2Y) is a certificate with a 2 year lifetime. - C(P3Y) is a certificate with a 3 year lifetime. - Only one of I(cert_expiry) or I(cert_lifetime) may be specified. type: str choices: [ P1Y, P2Y, P3Y ] seealso: - module: openssl_privatekey description: Can be used to create private keys (both for certificates and accounts). - module: openssl_csr description: Can be used to create a Certificate Signing Request (CSR). extends_documentation_fragment: - ecs_credential ''' EXAMPLES = r''' - name: Request a new certificate from Entrust with bare minimum parameters. Will request a new certificate if current one is valid but within 30 days of expiry. If replacing an existing file in path, will back it up. ecs_certificate: backup: true path: /etc/ssl/crt/ansible.com.crt full_chain_path: /etc/ssl/crt/ansible.com.chain.crt csr: /etc/ssl/csr/ansible.com.csr cert_type: EV_SSL requester_name: Jo Doe requester_email: [email protected] requester_phone: 555-555-5555 entrust_api_user: apiusername entrust_api_key: a^lv*32!cd9LnT entrust_api_client_cert_path: /etc/ssl/entrust/ecs-client.crt entrust_api_client_cert_key_path: /etc/ssl/entrust/ecs-client.key - name: If there is no certificate present in path, request a new certificate of type EV_SSL. Otherwise, if there is an Entrust managed certificate in path and it is within 63 days of expiration, request a renew of that certificate. ecs_certificate: path: /etc/ssl/crt/ansible.com.crt csr: /etc/ssl/csr/ansible.com.csr cert_type: EV_SSL cert_expiry: '2020-08-20' request_type: renew remaining_days: 63 requester_name: Jo Doe requester_email: [email protected] requester_phone: 555-555-5555 entrust_api_user: apiusername entrust_api_key: a^lv*32!cd9LnT entrust_api_client_cert_path: /etc/ssl/entrust/ecs-client.crt entrust_api_client_cert_key_path: /etc/ssl/entrust/ecs-client.key - name: If there is no certificate present in path, download certificate specified by tracking_id if it is still valid. Otherwise, if the certificate is within 79 days of expiration, request a renew of that certificate and save it in path. This can be used to "migrate" a certificate to be Ansible managed. ecs_certificate: path: /etc/ssl/crt/ansible.com.crt csr: /etc/ssl/csr/ansible.com.csr tracking_id: 2378915 request_type: renew remaining_days: 79 entrust_api_user: apiusername entrust_api_key: a^lv*32!cd9LnT entrust_api_client_cert_path: /etc/ssl/entrust/ecs-client.crt entrust_api_client_cert_key_path: /etc/ssl/entrust/ecs-client.key - name: Force a reissue of the certificate specified by tracking_id. ecs_certificate: path: /etc/ssl/crt/ansible.com.crt force: true tracking_id: 2378915 request_type: reissue entrust_api_user: apiusername entrust_api_key: a^lv*32!cd9LnT entrust_api_client_cert_path: /etc/ssl/entrust/ecs-client.crt entrust_api_client_cert_key_path: /etc/ssl/entrust/ecs-client.key - name: Request a new certificate with an alternative client. Note that the issued certificate will have it's Subject Distinguished Name use the organization details associated with that client, rather than what is in the CSR. ecs_certificate: path: /etc/ssl/crt/ansible.com.crt csr: /etc/ssl/csr/ansible.com.csr client_id: 2 requester_name: Jo Doe requester_email: [email protected] requester_phone: 555-555-5555 entrust_api_user: apiusername entrust_api_key: a^lv*32!cd9LnT entrust_api_client_cert_path: /etc/ssl/entrust/ecs-client.crt entrust_api_client_cert_key_path: /etc/ssl/entrust/ecs-client.key - name: Request a new certificate with a number of CSR parameters overridden and tracking information ecs_certificate: path: /etc/ssl/crt/ansible.com.crt full_chain_path: /etc/ssl/crt/ansible.com.chain.crt csr: /etc/ssl/csr/ansible.com.csr subject_alt_name: - ansible.testcertificates.com - www.testcertificates.com eku: SERVER_AND_CLIENT_AUTH ct_log: true org: Test Organization Inc. ou: - Administration tracking_info: "Submitted via Ansible" additional_emails: - [email protected] - [email protected] custom_fields: text1: Admin text2: Invoice 25 number1: 342 date1: '2018-01-01' email1: [email protected] dropdown1: red cert_expiry: '2020-08-15' requester_name: Jo Doe requester_email: [email protected] requester_phone: 555-555-5555 entrust_api_user: apiusername entrust_api_key: a^lv*32!cd9LnT entrust_api_client_cert_path: /etc/ssl/entrust/ecs-client.crt entrust_api_client_cert_key_path: /etc/ssl/entrust/ecs-client.key ''' RETURN = ''' filename: description: Path to the generated Certificate. returned: changed or success type: str sample: /etc/ssl/crt/www.ansible.com.crt backup_file: description: Name of backup file created for the certificate. returned: changed and if I(backup) is C(true) type: str sample: /path/to/www.ansible.com.crt.2019-03-09@11:22~ backup_full_chain_file: description: Name of the backup file created for the certificate chain. returned: changed and if I(backup) is C(true) and I(full_chain_path) is set. type: str sample: /path/to/ca.chain.crt.2019-03-09@11:22~ tracking_id: description: The tracking ID to reference and track the certificate in ECS. returned: success type: int sample: 380079 serial_number: description: The serial number of the issued certificate. returned: success type: int sample: 1235262234164342 cert_days: description: The number of days the certificate remains valid. returned: success type: int sample: 253 cert_status: description: - The certificate status in ECS. - 'Current possible values (which may be expanded in the future) are: C(ACTIVE), C(APPROVED), C(DEACTIVATED), C(DECLINED), C(EXPIRED), C(NA), C(PENDING), C(PENDING_QUORUM), C(READY), C(REISSUED), C(REISSUING), C(RENEWED), C(RENEWING), C(REVOKED), C(SUSPENDED)' returned: success type: str sample: ACTIVE cert_details: description: - The full response JSON from the Get Certificate call of the ECS API. - 'While the response contents are guaranteed to be forwards compatible with new ECS API releases, Entrust recommends that you do not make any playbooks take actions based on the content of this field. However it may be useful for debugging, logging, or auditing purposes.' returned: success type: dict ''' from ansible.module_utils.ecs.api import ( ecs_client_argument_spec, ECSClient, RestOperationException, SessionConfigurationException, ) import datetime import json import os import re import time import traceback from distutils.version import LooseVersion from ansible.module_utils import crypto as crypto_utils from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils._text import to_native, to_bytes CRYPTOGRAPHY_IMP_ERR = None try: import cryptography CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__) except ImportError: CRYPTOGRAPHY_IMP_ERR = traceback.format_exc() CRYPTOGRAPHY_FOUND = False else: CRYPTOGRAPHY_FOUND = True MINIMAL_CRYPTOGRAPHY_VERSION = '1.6' def validate_cert_expiry(cert_expiry): search_string_partial = re.compile(r'^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])\Z') search_string_full = re.compile(r'^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):' r'([0-5][0-9]|60)(.[0-9]+)?(([Zz])|([+|-]([01][0-9]|2[0-3]):[0-5][0-9]))\Z') if search_string_partial.match(cert_expiry) or search_string_full.match(cert_expiry): return True return False def calculate_cert_days(expires_after): cert_days = 0 if expires_after: expires_after_datetime = datetime.datetime.strptime(expires_after, '%Y-%m-%dT%H:%M:%SZ') cert_days = (expires_after_datetime - datetime.datetime.now()).days return cert_days # Populate the value of body[dict_param_name] with the JSON equivalent of # module parameter of param_name if that parameter is present, otherwise leave field # out of resulting dict def convert_module_param_to_json_bool(module, dict_param_name, param_name): body = {} if module.params[param_name] is not None: if module.params[param_name]: body[dict_param_name] = 'true' else: body[dict_param_name] = 'false' return body class EcsCertificate(object): ''' Entrust Certificate Services certificate class. ''' def __init__(self, module): self.path = module.params['path'] self.full_chain_path = module.params['full_chain_path'] self.force = module.params['force'] self.backup = module.params['backup'] self.request_type = module.params['request_type'] self.csr = module.params['csr'] # All return values self.changed = False self.filename = None self.tracking_id = None self.cert_status = None self.serial_number = None self.cert_days = None self.cert_details = None self.backup_file = None self.backup_full_chain_file = None self.cert = None self.ecs_client = None if self.path and os.path.exists(self.path): try: self.cert = crypto_utils.load_certificate(self.path, backend='cryptography') except Exception as dummy: self.cert = None # Instantiate the ECS client and then try a no-op connection to verify credentials are valid try: self.ecs_client = ECSClient( entrust_api_user=module.params['entrust_api_user'], entrust_api_key=module.params['entrust_api_key'], entrust_api_cert=module.params['entrust_api_client_cert_path'], entrust_api_cert_key=module.params['entrust_api_client_cert_key_path'], entrust_api_specification_path=module.params['entrust_api_specification_path'] ) except SessionConfigurationException as e: module.fail_json(msg='Failed to initialize Entrust Provider: {0}'.format(to_native(e))) try: self.ecs_client.GetAppVersion() except RestOperationException as e: module.fail_json(msg='Please verify credential information. Received exception when testing ECS connection: {0}'.format(to_native(e.message))) # Conversion of the fields that go into the 'tracking' parameter of the request object def convert_tracking_params(self, module): body = {} tracking = {} if module.params['requester_name']: tracking['requesterName'] = module.params['requester_name'] if module.params['requester_email']: tracking['requesterEmail'] = module.params['requester_email'] if module.params['requester_phone']: tracking['requesterPhone'] = module.params['requester_phone'] if module.params['tracking_info']: tracking['trackingInfo'] = module.params['tracking_info'] if module.params['custom_fields']: # Omit custom fields from submitted dict if not present, instead of submitting them with value of 'null' # The ECS API does technically accept null without error, but it complicates debugging user escalations and is unnecessary bandwidth. custom_fields = {} for k, v in module.params['custom_fields'].items(): if v is not None: custom_fields[k] = v tracking['customFields'] = custom_fields if module.params['additional_emails']: tracking['additionalEmails'] = module.params['additional_emails'] body['tracking'] = tracking return body def convert_cert_subject_params(self, module): body = {} if module.params['subject_alt_name']: body['subjectAltName'] = module.params['subject_alt_name'] if module.params['org']: body['org'] = module.params['org'] if module.params['ou']: body['ou'] = module.params['ou'] return body def convert_general_params(self, module): body = {} if module.params['eku']: body['eku'] = module.params['eku'] if self.request_type == 'new': body['certType'] = module.params['cert_type'] body['clientId'] = module.params['client_id'] body.update(convert_module_param_to_json_bool(module, 'ctLog', 'ct_log')) body.update(convert_module_param_to_json_bool(module, 'endUserKeyStorageAgreement', 'end_user_key_storage_agreement')) return body def convert_expiry_params(self, module): body = {} if module.params['cert_lifetime']: body['certLifetime'] = module.params['cert_lifetime'] elif module.params['cert_expiry']: body['certExpiryDate'] = module.params['cert_expiry'] # If neither cerTLifetime or certExpiryDate was specified and the request type is new, default to 365 days elif self.request_type != 'reissue': gmt_now = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())) expiry = gmt_now + datetime.timedelta(days=365) body['certExpiryDate'] = expiry.strftime("%Y-%m-%dT%H:%M:%S.00Z") return body def set_tracking_id_by_serial_number(self, module): try: # Use serial_number to identify if certificate is an Entrust Certificate # with an associated tracking ID serial_number = "{0:X}".format(self.cert.serial_number) cert_results = self.ecs_client.GetCertificates(serialNumber=serial_number).get('certificates', {}) if len(cert_results) == 1: self.tracking_id = cert_results[0].get('trackingId') except RestOperationException as dummy: # If we fail to find a cert by serial number, that's fine, we just don't set self.tracking_id return def set_cert_details(self, module): try: self.cert_details = self.ecs_client.GetCertificate(trackingId=self.tracking_id) self.cert_status = self.cert_details.get('status') self.serial_number = self.cert_details.get('serialNumber') self.cert_days = calculate_cert_days(self.cert_details.get('expiresAfter')) except RestOperationException as e: module.fail_json('Failed to get details of certificate with tracking_id="{0}", Error: '.format(self.tracking_id), to_native(e.message)) def check(self, module): if self.cert: # We will only treat a certificate as valid if it is found as a managed entrust cert. # We will only set updated tracking ID based on certificate in "path" if it is managed by entrust. self.set_tracking_id_by_serial_number(module) if module.params['tracking_id'] and self.tracking_id and module.params['tracking_id'] != self.tracking_id: module.warn('tracking_id parameter of "{0}" provided, but will be ignored. Valid certificate was present in path "{1}" with ' 'tracking_id of "{2}".'.format(module.params['tracking_id'], self.path, self.tracking_id)) # If we did not end up setting tracking_id based on existing cert, get from module params if not self.tracking_id: self.tracking_id = module.params['tracking_id'] if not self.tracking_id: return False self.set_cert_details(module) if self.cert_status == 'EXPIRED' or self.cert_status == 'SUSPENDED' or self.cert_status == 'REVOKED': return False if self.cert_days < module.params['remaining_days']: return False return True def request_cert(self, module): if not self.check(module) or self.force: body = {} # Read the CSR contents if self.csr and os.path.exists(self.csr): with open(self.csr, 'r') as csr_file: body['csr'] = csr_file.read() # Check if the path is already a cert # tracking_id may be set as a parameter or by get_cert_details if an entrust cert is in 'path'. If tracking ID is null # We will be performing a reissue operation. if self.request_type != 'new' and not self.tracking_id: module.warn('No existing Entrust certificate found in path={0} and no tracking_id was provided, setting request_type to "new" for this task' 'run. Future playbook runs that point to the pathination file in {1} will use request_type={2}' .format(self.path, self.path, self.request_type)) self.request_type = 'new' elif self.request_type == 'new' and self.tracking_id: module.warn('Existing certificate being acted upon, but request_type is "new", so will be a new certificate issuance rather than a' 'reissue or renew') # Use cases where request type is new and no existing certificate, or where request type is reissue/renew and a valid # existing certificate is found, do not need warnings. body.update(self.convert_tracking_params(module)) body.update(self.convert_cert_subject_params(module)) body.update(self.convert_general_params(module)) body.update(self.convert_expiry_params(module)) if not module.check_mode: try: if self.request_type == 'validate_only': body['validateOnly'] = 'true' result = self.ecs_client.NewCertRequest(Body=body) if self.request_type == 'new': result = self.ecs_client.NewCertRequest(Body=body) elif self.request_type == 'renew': result = self.ecs_client.RenewCertRequest(trackingId=self.tracking_id, Body=body) elif self.request_type == 'reissue': result = self.ecs_client.ReissueCertRequest(trackingId=self.tracking_id, Body=body) self.tracking_id = result.get('trackingId') self.set_cert_details(module) except RestOperationException as e: module.fail_json(msg='Failed to request new certificate from Entrust (ECS) {0}'.format(e.message)) if self.request_type != 'validate_only': if self.backup: self.backup_file = module.backup_local(self.path) crypto_utils.write_file(module, to_bytes(self.cert_details.get('endEntityCert'))) if self.full_chain_path: if self.backup: self.backup_full_chain_file = module.backup_local(self.full_chain_path) crypto_utils.write_file(module, to_bytes(self.cert_details.get('chainCerts')), path=self.full_chain_path) self.changed = True # If there is no certificate present in path but a tracking ID was specified, save it to disk elif not os.path.exists(self.path) and self.tracking_id: if not module.check_mode: crypto_utils.write_file(module, to_bytes(self.cert_details.get('endEntityCert'))) if self.full_chain_path: crypto_utils.write_file(module, to_bytes(self.cert_details.get('chainCerts')), path=self.full_chain_path) self.changed = True def dump(self): result = { 'changed': self.changed, 'filename': self.path, 'tracking_id': self.tracking_id, 'cert_status': self.cert_status, 'serial_number': self.serial_number, 'cert_days': self.cert_days, 'cert_details': self.cert_details, } if self.backup_file: result['backup_file'] = self.backup_file result['backup_full_chain_file'] = self.backup_full_chain_file return result def custom_fields_spec(): return dict( text1=dict(type='str'), text2=dict(type='str'), text3=dict(type='str'), text4=dict(type='str'), text5=dict(type='str'), text6=dict(type='str'), text7=dict(type='str'), text8=dict(type='str'), text9=dict(type='str'), text10=dict(type='str'), text11=dict(type='str'), text12=dict(type='str'), text13=dict(type='str'), text14=dict(type='str'), text15=dict(type='str'), number1=dict(type='float'), number2=dict(type='float'), number3=dict(type='float'), number4=dict(type='float'), number5=dict(type='float'), date1=dict(type='str'), date2=dict(type='str'), date3=dict(type='str'), date4=dict(type='str'), date5=dict(type='str'), email1=dict(type='str'), email2=dict(type='str'), email3=dict(type='str'), email4=dict(type='str'), email5=dict(type='str'), dropdown1=dict(type='str'), dropdown2=dict(type='str'), dropdown3=dict(type='str'), dropdown4=dict(type='str'), dropdown5=dict(type='str'), ) def ecs_certificate_argument_spec(): return dict( backup=dict(type='bool', default=False), force=dict(type='bool', default=False), path=dict(type='path', required=True), full_chain_path=dict(type='path'), tracking_id=dict(type='int'), remaining_days=dict(type='int', default=30), request_type=dict(type='str', default='new', choices=['new', 'renew', 'reissue', 'validate_only']), cert_type=dict(type='str', choices=['STANDARD_SSL', 'ADVANTAGE_SSL', 'UC_SSL', 'EV_SSL', 'WILDCARD_SSL', 'PRIVATE_SSL', 'PD_SSL', 'CODE_SIGNING', 'EV_CODE_SIGNING', 'CDS_INDIVIDUAL', 'CDS_GROUP', 'CDS_ENT_LITE', 'CDS_ENT_PRO', 'SMIME_ENT', ]), csr=dict(type='str'), subject_alt_name=dict(type='list', elements='str'), eku=dict(type='str', choices=['SERVER_AUTH', 'CLIENT_AUTH', 'SERVER_AND_CLIENT_AUTH']), ct_log=dict(type='bool'), client_id=dict(type='int', default=1), org=dict(type='str'), ou=dict(type='list', elements='str'), end_user_key_storage_agreement=dict(type='bool'), tracking_info=dict(type='str'), requester_name=dict(type='str', required=True), requester_email=dict(type='str', required=True), requester_phone=dict(type='str', required=True), additional_emails=dict(type='list', elements='str'), custom_fields=dict(type='dict', default=None, options=custom_fields_spec()), cert_expiry=dict(type='str'), cert_lifetime=dict(type='str', choices=['P1Y', 'P2Y', 'P3Y']), ) def main(): ecs_argument_spec = ecs_client_argument_spec() ecs_argument_spec.update(ecs_certificate_argument_spec()) module = AnsibleModule( argument_spec=ecs_argument_spec, required_if=( ['request_type', 'new', ['cert_type']], ['request_type', 'validate_only', ['cert_type']], ['cert_type', 'CODE_SIGNING', ['end_user_key_storage_agreement']], ['cert_type', 'EV_CODE_SIGNING', ['end_user_key_storage_agreement']], ), mutually_exclusive=( ['cert_expiry', 'cert_lifetime'], ), supports_check_mode=True, ) if not CRYPTOGRAPHY_FOUND or CRYPTOGRAPHY_VERSION < LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION): module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)), exception=CRYPTOGRAPHY_IMP_ERR) # If validate_only is used, pointing to an existing tracking_id is an invalid operation if module.params['tracking_id']: if module.params['request_type'] == 'new' or module.params['request_type'] == 'validate_only': module.fail_json(msg='The tracking_id field is invalid when request_type="{0}".'.format(module.params['request_type'])) # A reissued request can not specify an expiration date or lifetime if module.params['request_type'] == 'reissue': if module.params['cert_expiry']: module.fail_json(msg='The cert_expiry field is invalid when request_type="reissue".') elif module.params['cert_lifetime']: module.fail_json(msg='The cert_lifetime field is invalid when request_type="reissue".') # Only a reissued request can omit the CSR else: module_params_csr = module.params['csr'] if module_params_csr is None: module.fail_json(msg='The csr field is required when request_type={0}'.format(module.params['request_type'])) elif not os.path.exists(module_params_csr): module.fail_json(msg='The csr field of {0} was not a valid path. csr is required when request_type={1}'.format( module_params_csr, module.params['request_type'])) if module.params['ou'] and len(module.params['ou']) > 1: module.fail_json(msg='Multiple "ou" values are not currently supported.') if module.params['end_user_key_storage_agreement']: if module.params['cert_type'] != 'CODE_SIGNING' and module.params['cert_type'] != 'EV_CODE_SIGNING': module.fail_json(msg='Parameter "end_user_key_storage_agreement" is valid only for cert_types "CODE_SIGNING" and "EV_CODE_SIGNING"') if module.params['org'] and module.params['client_id'] != 1 and module.params['cert_type'] != 'PD_SSL': module.fail_json(msg='The "org" parameter is not supported when client_id parameter is set to a value other than 1, unless cert_type is "PD_SSL".') if module.params['cert_expiry']: if not validate_cert_expiry(module.params['cert_expiry']): module.fail_json(msg='The "cert_expiry" parameter of "{0}" is not a valid date or date-time'.format(module.params['cert_expiry'])) certificate = EcsCertificate(module) certificate.request_cert(module) result = certificate.dump() module.exit_json(**result) if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,738
ecs_certificate chain not in standard format
##### SUMMARY The file contents that end up as the output of ecs_certificates "full_chain_path" are not a valid format for import into, for example, a PKCS12 store. This is the result of the return value of the ECS API being an array, and not properly turning that array into concatenated certificates for the resulting PEM file. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ecs_certificate ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> Applies to ansible 2.9.0.dev0 ##### CONFIGURATION N/A affects all configurations ##### OS / ENVIRONMENT N/A affects all OS/environment ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Populate full_chain_path and inspect output ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> A properly formatted PEM certificate chain ##### ACTUAL RESULTS A certificate chain in format ['CERT','CERT']
https://github.com/ansible/ansible/issues/61738
https://github.com/ansible/ansible/pull/61858
cac93cbd1f041eac5045250a6663644cdbba3df8
943888b9553bca40b18c3922f508645d09f53392
2019-09-03T20:47:14Z
python
2019-09-07T05:58:25Z
test/integration/targets/ecs_certificate/tasks/main.yml
--- ## Verify that integration_config was specified - block: - assert: that: - entrust_api_user is defined - entrust_api_key is defined - entrust_api_ip_address is defined - entrust_cloud_ip_address is defined - entrust_api_client_cert_path is defined or entrust_api_client_cert_contents is defined - entrust_api_client_cert_key_path is defined or entrust_api_client_cert_key_contents - cacerts_bundle_path_local is defined ## SET UP TEST ENVIRONMENT ######################################################################## - name: copy the files needed for verifying test server certificate to the host copy: src: '{{ cacerts_bundle_path_local }}/' dest: '{{ cacerts_bundle_path }}' - name: Update the CA certificates for our QA certs (collection may need updating if new QA environments used) command: c_rehash {{ cacerts_bundle_path }} - name: Update hosts file lineinfile: path: /etc/hosts state: present regexp: 'api.entrust.net$' line: '{{ entrust_api_ip_address }} api.entrust.net' - name: Update hosts file lineinfile: path: /etc/hosts state: present regexp: 'cloud.entrust.net$' line: '{{ entrust_cloud_ip_address }} cloud.entrust.net' - name: Clear out the temporary directory for storing the API connection information file: path: '{{ tmpdir_path }}' state: absent - name: Create a directory for storing the API connection Information file: path: '{{ tmpdir_path }}' state: directory - name: Copy the files needed for the connection to entrust API to the host copy: src: '{{ entrust_api_client_cert_path }}' dest: '{{ entrust_api_cert }}' - name: Copy the files needed for the connection to entrust API to the host copy: src: '{{ entrust_api_client_cert_key_path }}' dest: '{{ entrust_api_cert_key }}' ## SETUP CSR TO REQUEST - name: Generate a 2048 bit RSA private key openssl_privatekey: path: '{{ privatekey_path }}' passphrase: '{{ privatekey_passphrase }}' cipher: auto type: RSA size: 2048 - name: Generate a certificate signing request using the generated key openssl_csr: path: '{{ csr_path }}' privatekey_path: '{{ privatekey_path }}' privatekey_passphrase: '{{ privatekey_passphrase }}' common_name: '{{ common_name }}' organization_name: '{{ organization_name | default(omit) }}' organizational_unit_name: '{{ organizational_unit_name | default(omit) }}' country_name: '{{ country_name | default(omit) }}' state_or_province_name: '{{ state_or_province_name | default(omit) }}' digest: sha256 - block: - name: Have ECS generate a signed certificate ecs_certificate: backup: True path: '{{ example1_cert_path }}' full_chain_path: '{{ example1_chain_path }}' csr: '{{ csr_path }}' cert_type: '{{ example1_cert_type }}' requester_name: '{{ entrust_requester_name }}' requester_email: '{{ entrust_requester_email }}' requester_phone: '{{ entrust_requester_phone }}' entrust_api_user: '{{ entrust_api_user }}' entrust_api_key: '{{ entrust_api_key }}' entrust_api_client_cert_path: '{{ entrust_api_cert }}' entrust_api_client_cert_key_path: '{{ entrust_api_cert_key }}' register: example1_result - assert: that: - example1_result is not failed - example1_result.changed - example1_result.tracking_id > 0 - example1_result.serial_number is string # Internal CA refuses to issue certificates with the same DN in a short time frame - name: Sleep for 5 seconds so we don't run into duplicate-request errors pause: seconds: 5 - name: Attempt to have ECS generate a signed certificate, but existing one is valid ecs_certificate: backup: True path: '{{ example1_cert_path }}' full_chain_path: '{{ example1_chain_path }}' csr: '{{ csr_path }}' cert_type: '{{ example1_cert_type }}' requester_name: '{{ entrust_requester_name }}' requester_email: '{{ entrust_requester_email }}' requester_phone: '{{ entrust_requester_phone }}' entrust_api_user: '{{ entrust_api_user }}' entrust_api_key: '{{ entrust_api_key }}' entrust_api_client_cert_path: '{{ entrust_api_cert }}' entrust_api_client_cert_key_path: '{{ entrust_api_cert_key }}' register: example2_result - assert: that: - example2_result is not failed - not example2_result.changed - example2_result.backup_file is undefined - example2_result.backup_full_chain_file is undefined - example2_result.serial_number == example1_result.serial_number - example2_result.tracking_id == example1_result.tracking_id # Internal CA refuses to issue certificates with the same DN in a short time frame - name: Sleep for 5 seconds so we don't run into duplicate-request errors pause: seconds: 5 - name: Force a reissue with no CSR, verify that contents changed ecs_certificate: backup: True force: True path: '{{ example1_cert_path }}' full_chain_path: '{{ example1_chain_path }}' cert_type: '{{ example1_cert_type }}' request_type: reissue requester_name: '{{ entrust_requester_name }}' requester_email: '{{ entrust_requester_email }}' requester_phone: '{{ entrust_requester_phone }}' entrust_api_user: '{{ entrust_api_user }}' entrust_api_key: '{{ entrust_api_key }}' entrust_api_client_cert_path: '{{ entrust_api_cert }}' entrust_api_client_cert_key_path: '{{ entrust_api_cert_key }}' register: example3_result - assert: that: - example3_result is not failed - example3_result.changed - example3_result.backup_file is string - example3_result.backup_full_chain_file is string - example3_result.tracking_id > 0 - example3_result.tracking_id != example1_result.tracking_id - example3_result.serial_number != example1_result.serial_number # Internal CA refuses to issue certificates with the same DN in a short time frame - name: Sleep for 5 seconds so we don't run into duplicate-request errors pause: seconds: 5 - name: Test a request with all of the various optional possible fields populated ecs_certificate: path: '{{ example4_cert_path }}' csr: '{{ csr_path }}' subject_alt_name: '{{ example4_subject_alt_name }}' eku: '{{ example4_eku }}' ct_log: True cert_type: '{{ example4_cert_type }}' org: '{{ example4_org }}' ou: '{{ example4_ou }}' tracking_info: '{{ example4_tracking_info }}' additional_emails: '{{ example4_additional_emails }}' custom_fields: '{{ example4_custom_fields }}' cert_expiry: '{{ example4_cert_expiry }}' requester_name: '{{ entrust_requester_name }}' requester_email: '{{ entrust_requester_email }}' requester_phone: '{{ entrust_requester_phone }}' entrust_api_user: '{{ entrust_api_user }}' entrust_api_key: '{{ entrust_api_key }}' entrust_api_client_cert_path: '{{ entrust_api_cert }}' entrust_api_client_cert_key_path: '{{ entrust_api_cert_key }}' register: example4_result - assert: that: - example4_result is not failed - example4_result.changed - example4_result.backup_file is undefined - example4_result.backup_full_chain_file is undefined - example4_result.tracking_id > 0 - example4_result.serial_number is string always: - name: clean-up temporary folder file: path: '{{ tmpdir_path }}' state: absent
closed
ansible/ansible
https://github.com/ansible/ansible
61,738
ecs_certificate chain not in standard format
##### SUMMARY The file contents that end up as the output of ecs_certificates "full_chain_path" are not a valid format for import into, for example, a PKCS12 store. This is the result of the return value of the ECS API being an array, and not properly turning that array into concatenated certificates for the resulting PEM file. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ecs_certificate ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> Applies to ansible 2.9.0.dev0 ##### CONFIGURATION N/A affects all configurations ##### OS / ENVIRONMENT N/A affects all OS/environment ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Populate full_chain_path and inspect output ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> A properly formatted PEM certificate chain ##### ACTUAL RESULTS A certificate chain in format ['CERT','CERT']
https://github.com/ansible/ansible/issues/61738
https://github.com/ansible/ansible/pull/61858
cac93cbd1f041eac5045250a6663644cdbba3df8
943888b9553bca40b18c3922f508645d09f53392
2019-09-03T20:47:14Z
python
2019-09-07T05:58:25Z
test/integration/targets/ecs_certificate/vars/main.yml
--- # vars file for test_ecs_certificate # Path on various hosts that cacerts need to be put as a prerequisite to API server cert validation. # May need to be customized for some environments based on SSL implementations # that ansible "urls" module utility is using as a backing. cacerts_bundle_path: /etc/pki/tls/certs common_name: '{{ ansible_date_time.epoch }}.ansint.testcertificates.com' organization_name: CMS API, Inc. organizational_unit_name: RSA country_name: US state_or_province_name: MA privatekey_passphrase: Passphrase452! tmpdir_path: /tmp/ecs_cert_test/{{ ansible_date_time.epoch }} privatekey_path: '{{ tmpdir_path }}/testcertificates.key' entrust_api_cert: '{{ tmpdir_path }}/authcert.cer' entrust_api_cert_key: '{{ tmpdir_path }}/authkey.cer' csr_path: '{{ tmpdir_path }}/request.csr' entrust_requester_name: C Trufan entrust_requester_email: [email protected] entrust_requester_phone: 1-555-555-5555 # e.g. 15555555555 # TEST 1 example1_cert_path: '{{ tmpdir_path }}/issuedcert_1.pem' example1_chain_path: '{{ tmpdir_path }}/issuedcert_1_chain.pem' example1_cert_type: EV_SSL example4_cert_path: '{{ tmpdir_path }}/issuedcert_2.pem' example4_subject_alt_name: - ansible.testcertificates.com - www.testcertificates.com example4_eku: SERVER_AND_CLIENT_AUTH example4_cert_type: UC_SSL # Test a secondary org and special characters example4_org: Cañon City, Inc. example4_ou: - StringrsaString example4_tracking_info: Submitted via Ansible Integration example4_additional_emails: - [email protected] - [email protected] example4_custom_fields: text1: Admin text2: Invoice 25 number1: 342 date3: '2018-01-01' email2: [email protected] dropdown2: Dropdown 2 Value 1 example4_cert_expiry: 2020-08-15
closed
ansible/ansible
https://github.com/ansible/ansible
61,698
Impossible to omit or pass a default value to published_ports on docker_container
##### SUMMARY Hi, When you want to pass an empty list or a null/None value to published_ports on docker_container it's actually impossible, so you get this error ``` File \"/tmp/ansible_docker_container_payload_ZOZc5l/__main__.py\", line 1448, in _parse_publish_ports\r\n File \"/tmp/ansible_docker_container_payload_ZOZc5l/__main__.py\", line 1003, in parse_port_range\r\nValueError: invalid literal for int() with base 10: ''\r\n", ``` Ref #61602 The problem is that we parse via `_parse_publish_ports` the parameter if self.published_ports is not None or not 'all' Also after that there is no control on each elements of the list, (not like for the volumes parameter by example) So I propose to patch line 1151 `self.published_ports = self._parse_publish_ports() ` to `self.published_ports = self._parse_publish_ports() if self.published_ports else None` Best regards ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME * docker_container ##### ANSIBLE VERSION ``` ansible 2.8.4 ```
https://github.com/ansible/ansible/issues/61698
https://github.com/ansible/ansible/pull/61740
943888b9553bca40b18c3922f508645d09f53392
d40ba28fb43a15eac0c604647e6535fb8fba4939
2019-09-03T07:53:49Z
python
2019-09-08T18:30:18Z
changelogs/fragments/61740-docker_container-port-range-parsing.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,698
Impossible to omit or pass a default value to published_ports on docker_container
##### SUMMARY Hi, When you want to pass an empty list or a null/None value to published_ports on docker_container it's actually impossible, so you get this error ``` File \"/tmp/ansible_docker_container_payload_ZOZc5l/__main__.py\", line 1448, in _parse_publish_ports\r\n File \"/tmp/ansible_docker_container_payload_ZOZc5l/__main__.py\", line 1003, in parse_port_range\r\nValueError: invalid literal for int() with base 10: ''\r\n", ``` Ref #61602 The problem is that we parse via `_parse_publish_ports` the parameter if self.published_ports is not None or not 'all' Also after that there is no control on each elements of the list, (not like for the volumes parameter by example) So I propose to patch line 1151 `self.published_ports = self._parse_publish_ports() ` to `self.published_ports = self._parse_publish_ports() if self.published_ports else None` Best regards ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME * docker_container ##### ANSIBLE VERSION ``` ansible 2.8.4 ```
https://github.com/ansible/ansible/issues/61698
https://github.com/ansible/ansible/pull/61740
943888b9553bca40b18c3922f508645d09f53392
d40ba28fb43a15eac0c604647e6535fb8fba4939
2019-09-03T07:53:49Z
python
2019-09-08T18:30:18Z
lib/ansible/modules/cloud/docker/docker_container.py
#!/usr/bin/python # # Copyright 2016 Red Hat | Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: docker_container short_description: manage docker containers description: - Manage the life cycle of docker containers. - Supports check mode. Run with --check and --diff to view config difference and list of actions to be taken. version_added: "2.1" options: auto_remove: description: - enable auto-removal of the container on daemon side when the container's process exits type: bool default: no version_added: "2.4" blkio_weight: description: - Block IO (relative weight), between 10 and 1000. type: int capabilities: description: - List of capabilities to add to the container. type: list cap_drop: description: - List of capabilities to drop from the container. type: list version_added: "2.7" cleanup: description: - Use with I(detach=false) to remove the container after successful execution. type: bool default: no version_added: "2.2" command: description: - Command to execute when the container starts. A command may be either a string or a list. - Prior to version 2.4, strings were split on commas. type: raw comparisons: description: - Allows to specify how properties of existing containers are compared with module options to decide whether the container should be recreated / updated or not. Only options which correspond to the state of a container as handled by the Docker daemon can be specified, as well as C(networks). - Must be a dictionary specifying for an option one of the keys C(strict), C(ignore) and C(allow_more_present). - If C(strict) is specified, values are tested for equality, and changes always result in updating or restarting. If C(ignore) is specified, changes are ignored. - C(allow_more_present) is allowed only for lists, sets and dicts. If it is specified for lists or sets, the container will only be updated or restarted if the module option contains a value which is not present in the container's options. If the option is specified for a dict, the container will only be updated or restarted if the module option contains a key which isn't present in the container's option, or if the value of a key present differs. - The wildcard option C(*) can be used to set one of the default values C(strict) or C(ignore) to I(all) comparisons. - See the examples for details. type: dict version_added: "2.8" cpu_period: description: - Limit CPU CFS (Completely Fair Scheduler) period type: int cpu_quota: description: - Limit CPU CFS (Completely Fair Scheduler) quota type: int cpuset_cpus: description: - CPUs in which to allow execution C(1,3) or C(1-3). type: str cpuset_mems: description: - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1) type: str cpu_shares: description: - CPU shares (relative weight). type: int detach: description: - Enable detached mode to leave the container running in background. If disabled, the task will reflect the status of the container run (failed if the command failed). type: bool default: yes devices: description: - "List of host device bindings to add to the container. Each binding is a mapping expressed in the format: <path_on_host>:<path_in_container>:<cgroup_permissions>" type: list device_read_bps: description: - "List of device path and read rate (bytes per second) from device." type: list suboptions: path: description: - Device path in the container. type: str required: yes rate: description: - "Device read limit. Format: <number>[<unit>]" - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)" - "Omitting the unit defaults to bytes." type: str required: yes version_added: "2.8" device_write_bps: description: - "List of device and write rate (bytes per second) to device." type: list suboptions: path: description: - Device path in the container. type: str required: yes rate: description: - "Device read limit. Format: <number>[<unit>]" - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)" - "Omitting the unit defaults to bytes." type: str required: yes version_added: "2.8" device_read_iops: description: - "List of device and read rate (IO per second) from device." type: list suboptions: path: description: - Device path in the container. type: str required: yes rate: description: - "Device read limit." - "Must be a positive integer." type: int required: yes version_added: "2.8" device_write_iops: description: - "List of device and write rate (IO per second) to device." type: list suboptions: path: description: - Device path in the container. type: str required: yes rate: description: - "Device read limit." - "Must be a positive integer." type: int required: yes version_added: "2.8" dns_opts: description: - list of DNS options type: list dns_servers: description: - List of custom DNS servers. type: list dns_search_domains: description: - List of custom DNS search domains. type: list domainname: description: - Container domainname. type: str version_added: "2.5" env: description: - Dictionary of key,value pairs. - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (e.g. C("true")) in order to avoid data loss. type: dict env_file: description: - Path to a file, present on the target, containing environment variables I(FOO=BAR). - If variable also present in C(env), then C(env) value will override. type: path version_added: "2.2" entrypoint: description: - Command that overwrites the default ENTRYPOINT of the image. type: list etc_hosts: description: - Dict of host-to-IP mappings, where each host name is a key in the dictionary. Each host name will be added to the container's /etc/hosts file. type: dict exposed_ports: description: - List of additional container ports which informs Docker that the container listens on the specified network ports at runtime. If the port is already exposed using EXPOSE in a Dockerfile, it does not need to be exposed again. type: list aliases: - exposed - expose force_kill: description: - Use the kill command when stopping a running container. type: bool default: no aliases: - forcekill groups: description: - List of additional group names and/or IDs that the container process will run as. type: list healthcheck: description: - 'Configure a check that is run to determine whether or not containers for this service are "healthy". See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck) for details on how healthchecks work.' - 'I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)' type: dict suboptions: test: description: - Command to run to check health. - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL). type: raw interval: description: - 'Time between running the check. (default: 30s)' type: str timeout: description: - 'Maximum time to allow one check to run. (default: 30s)' type: str retries: description: - 'Consecutive failures needed to report unhealthy. It accept integer value. (default: 3)' type: int start_period: description: - 'Start period for the container to initialize before starting health-retries countdown. (default: 0s)' type: str version_added: "2.8" hostname: description: - Container hostname. type: str ignore_image: description: - When C(state) is I(present) or I(started) the module compares the configuration of an existing container to requested configuration. The evaluation includes the image version. If the image version in the registry does not match the container, the container will be recreated. Stop this behavior by setting C(ignore_image) to I(True). - I(Warning:) This option is ignored if C(image) or C(*) is used for the C(comparisons) option. type: bool default: no version_added: "2.2" image: description: - Repository path and tag used to create the container. If an image is not found or pull is true, the image will be pulled from the registry. If no tag is included, C(latest) will be used. - Can also be an image ID. If this is the case, the image is assumed to be available locally. The C(pull) option is ignored for this case. type: str init: description: - Run an init inside the container that forwards signals and reaps processes. This option requires Docker API >= 1.25. type: bool default: no version_added: "2.6" interactive: description: - Keep stdin open after a container is launched, even if not attached. type: bool default: no ipc_mode: description: - Set the IPC mode for the container. Can be one of 'container:<name|id>' to reuse another container's IPC namespace or 'host' to use the host's IPC namespace within the container. type: str keep_volumes: description: - Retain volumes associated with a removed container. type: bool default: yes kill_signal: description: - Override default signal used to kill a running container. type: str kernel_memory: description: - "Kernel memory limit (format: C(<number>[<unit>])). Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)." - Omitting the unit defaults to bytes. type: str labels: description: - Dictionary of key value pairs. type: dict links: description: - List of name aliases for linked containers in the format C(container_name:alias). - Setting this will force container to be restarted. type: list log_driver: description: - Specify the logging driver. Docker uses I(json-file) by default. - See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices. type: str log_options: description: - Dictionary of options specific to the chosen log_driver. See https://docs.docker.com/engine/admin/logging/overview/ for details. type: dict aliases: - log_opt mac_address: description: - Container MAC address (e.g. 92:d0:c6:0a:29:33) type: str memory: description: - "Memory limit (format: C(<number>[<unit>])). Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)." - Omitting the unit defaults to bytes. type: str default: '0' memory_reservation: description: - "Memory soft limit (format: C(<number>[<unit>])). Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)." - Omitting the unit defaults to bytes. type: str memory_swap: description: - "Total memory limit (memory + swap, format: C(<number>[<unit>])). Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)." - Omitting the unit defaults to bytes. type: str memory_swappiness: description: - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. - If not set, the value will be remain the same if container exists and will be inherited from the host machine if it is (re-)created. type: int mounts: version_added: "2.9" type: list description: - 'Specification for mounts to be added to the container. More powerful alternative to I(volumes).' suboptions: target: description: - Path inside the container. type: str required: true source: description: - Mount source (e.g. a volume name or a host path). type: str type: description: - The mount type. - Note that C(npipe) is only supported by Docker for Windows. type: str choices: - 'bind' - 'volume' - 'tmpfs' - 'npipe' default: volume read_only: description: - 'Whether the mount should be read-only.' type: bool consistency: description: - 'The consistency requirement for the mount.' type: str choices: - 'default' - 'consistent' - 'cached' - 'delegated' propagation: description: - Propagation mode. Only valid for the C(bind) type. type: str choices: - 'private' - 'rprivate' - 'shared' - 'rshared' - 'slave' - 'rslave' no_copy: description: - False if the volume should be populated with the data from the target. Only valid for the C(volume) type. - The default value is C(false). type: bool labels: description: - User-defined name and labels for the volume. Only valid for the C(volume) type. type: dict volume_driver: description: - Specify the volume driver. Only valid for the C(volume) type. - See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details. type: str volume_options: description: - Dictionary of options specific to the chosen volume_driver. See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details. type: dict tmpfs_size: description: - "The size for the tmpfs mount in bytes. Format: <number>[<unit>]" - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)" - "Omitting the unit defaults to bytes." type: str tmpfs_mode: description: - The permission mode for the tmpfs mount. type: str name: description: - Assign a name to a new container or match an existing container. - When identifying an existing container name may be a name or a long or short container ID. type: str required: yes network_mode: description: - Connect the container to a network. Choices are "bridge", "host", "none" or "container:<name|id>" type: str userns_mode: description: - Set the user namespace mode for the container. Currently, the only valid value is C(host). type: str version_added: "2.5" networks: description: - List of networks the container belongs to. - For examples of the data structure and usage see EXAMPLES below. - To remove a container from one or more networks, use the C(purge_networks) option. - Note that as opposed to C(docker run ...), M(docker_container) does not remove the default network if C(networks) is specified. You need to explicity use C(purge_networks) to enforce the removal of the default network (and all other networks not explicitly mentioned in C(networks)). type: list suboptions: name: description: - The network's name. type: str required: yes ipv4_address: description: - The container's IPv4 address in this network. type: str ipv6_address: description: - The container's IPv6 address in this network. type: str links: description: - A list of containers to link to. type: list aliases: description: - List of aliases for this container in this network. These names can be used in the network to reach this container. type: list version_added: "2.2" networks_cli_compatible: description: - "When networks are provided to the module via the I(networks) option, the module behaves differently than C(docker run --network): C(docker run --network other) will create a container with network C(other) attached, but the default network not attached. This module with C(networks: {name: other}) will create a container with both C(default) and C(other) attached. If I(purge_networks) is set to C(yes), the C(default) network will be removed afterwards." - "If I(networks_cli_compatible) is set to C(yes), this module will behave as C(docker run --network) and will I(not) add the default network if C(networks) is specified. If C(networks) is not specified, the default network will be attached." - "Note that docker CLI also sets C(network_mode) to the name of the first network added if C(--network) is specified. For more compatibility with docker CLI, you explicitly have to set C(network_mode) to the name of the first network you're adding." - Current value is C(no). A new default of C(yes) will be set in Ansible 2.12. type: bool version_added: "2.8" oom_killer: description: - Whether or not to disable OOM Killer for the container. type: bool oom_score_adj: description: - An integer value containing the score given to the container in order to tune OOM killer preferences. type: int version_added: "2.2" output_logs: description: - If set to true, output of the container command will be printed (only effective when log_driver is set to json-file or journald. type: bool default: no version_added: "2.7" paused: description: - Use with the started state to pause running processes inside the container. type: bool default: no pid_mode: description: - Set the PID namespace mode for the container. - Note that Docker SDK for Python < 2.0 only supports 'host'. Newer versions of the Docker SDK for Python (docker) allow all values supported by the docker daemon. type: str pids_limit: description: - Set PIDs limit for the container. It accepts an integer value. - Set -1 for unlimited PIDs. type: int version_added: "2.8" privileged: description: - Give extended privileges to the container. type: bool default: no published_ports: description: - List of ports to publish from the container to the host. - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is a host interface." - Port ranges can be used for source and destination ports. If two ranges with different lengths are specified, the shorter range will be used. - "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are I(not) allowed. This is different from the C(docker) command line utility. Use the L(dig lookup,../lookup/dig.html) to resolve hostnames." - A value of C(all) will publish all exposed container ports to random host ports, ignoring any other mappings. - If C(networks) parameter is provided, will inspect each network to see if there exists a bridge network with optional parameter com.docker.network.bridge.host_binding_ipv4. If such a network is found, then published ports where no host IP address is specified will be bound to the host IP pointed to by com.docker.network.bridge.host_binding_ipv4. Note that the first bridge network with a com.docker.network.bridge.host_binding_ipv4 value encountered in the list of C(networks) is the one that will be used. type: list aliases: - ports pull: description: - If true, always pull the latest version of an image. Otherwise, will only pull an image when missing. - I(Note) that images are only pulled when specified by name. If the image is specified as a image ID (hash), it cannot be pulled. type: bool default: no purge_networks: description: - Remove the container from ALL networks not included in C(networks) parameter. - Any default networks such as I(bridge), if not found in C(networks), will be removed as well. type: bool default: no version_added: "2.2" read_only: description: - Mount the container's root file system as read-only. type: bool default: no recreate: description: - Use with present and started states to force the re-creation of an existing container. type: bool default: no restart: description: - Use with started state to force a matching container to be stopped and restarted. type: bool default: no restart_policy: description: - Container restart policy. Place quotes around I(no) option. type: str choices: - 'no' - 'on-failure' - 'always' - 'unless-stopped' restart_retries: description: - Use with restart policy to control maximum number of restart attempts. type: int runtime: description: - Runtime to use for the container. type: str version_added: "2.8" shm_size: description: - "Size of C(/dev/shm) (format: C(<number>[<unit>])). Number is positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)." - Omitting the unit defaults to bytes. If you omit the size entirely, the system uses C(64M). type: str security_opts: description: - List of security options in the form of C("label:user:User") type: list state: description: - 'I(absent) - A container matching the specified name will be stopped and removed. Use force_kill to kill the container rather than stopping it. Use keep_volumes to retain volumes associated with the removed container.' - 'I(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no container matches the name, a container will be created. If a container matches the name but the provided configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created with the requested config. Image version will be taken into account when comparing configuration. To ignore image version use the ignore_image option. Use the recreate option to force the re-creation of the matching container. Use force_kill to kill the container rather than stopping it. Use keep_volumes to retain volumes associated with a removed container.' - 'I(started) - Asserts there is a running container matching the name and any provided configuration. If no container matches the name, a container will be created and started. If a container matching the name is found but the configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and a new container will be created with the requested configuration and started. Image version will be taken into account when comparing configuration. To ignore image version use the ignore_image option. Use recreate to always re-create a matching container, even if it is running. Use restart to force a matching container to be stopped and restarted. Use force_kill to kill a container rather than stopping it. Use keep_volumes to retain volumes associated with a removed container.' - 'I(stopped) - Asserts that the container is first I(present), and then if the container is running moves it to a stopped state. Use force_kill to kill a container rather than stopping it.' type: str default: started choices: - absent - present - stopped - started stop_signal: description: - Override default signal used to stop the container. type: str stop_timeout: description: - Number of seconds to wait for the container to stop before sending SIGKILL. When the container is created by this module, its C(StopTimeout) configuration will be set to this value. - When the container is stopped, will be used as a timeout for stopping the container. In case the container has a custom C(StopTimeout) configuration, the behavior depends on the version of the docker daemon. New versions of the docker daemon will always use the container's configured C(StopTimeout) value if it has been configured. type: int trust_image_content: description: - If C(yes), skip image verification. type: bool default: no tmpfs: description: - Mount a tmpfs directory type: list version_added: 2.4 tty: description: - Allocate a pseudo-TTY. type: bool default: no ulimits: description: - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)" type: list sysctls: description: - Dictionary of key,value pairs. type: dict version_added: 2.4 user: description: - Sets the username or UID used and optionally the groupname or GID for the specified command. - "Can be [ user | user:group | uid | uid:gid | user:gid | uid:group ]" type: str uts: description: - Set the UTS namespace mode for the container. type: str volumes: description: - List of volumes to mount within the container. - "Use docker CLI-style syntax: C(/host:/container[:mode])" - "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent), C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave), and C(nocopy). Note that the docker daemon might not support all modes and combinations of such modes." - SELinux hosts can additionally use C(z) or C(Z) to use a shared or private label for the volume. - "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw), C(z), and C(Z)." type: list volume_driver: description: - The container volume driver. type: str volumes_from: description: - List of container names or Ids to get volumes from. type: list working_dir: description: - Path to the working directory. type: str version_added: "2.4" extends_documentation_fragment: - docker - docker.docker_py_1_documentation author: - "Cove Schneider (@cove)" - "Joshua Conner (@joshuaconner)" - "Pavel Antonov (@softzilla)" - "Thomas Steinbach (@ThomasSteinbach)" - "Philippe Jandot (@zfil)" - "Daan Oosterveld (@dusdanig)" - "Chris Houseknecht (@chouseknecht)" - "Kassian Sun (@kassiansun)" - "Felix Fontein (@felixfontein)" requirements: - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" - "Docker API >= 1.20" ''' EXAMPLES = ''' - name: Create a data container docker_container: name: mydata image: busybox volumes: - /data - name: Re-create a redis container docker_container: name: myredis image: redis command: redis-server --appendonly yes state: present recreate: yes exposed_ports: - 6379 volumes_from: - mydata - name: Restart a container docker_container: name: myapplication image: someuser/appimage state: started restart: yes links: - "myredis:aliasedredis" devices: - "/dev/sda:/dev/xvda:rwm" ports: - "8080:9000" - "127.0.0.1:8081:9001/udp" env: SECRET_KEY: "ssssh" # Values which might be parsed as numbers, booleans or other types by the YAML parser need to be quoted BOOLEAN_KEY: "yes" - name: Container present docker_container: name: mycontainer state: present image: ubuntu:14.04 command: sleep infinity - name: Stop a container docker_container: name: mycontainer state: stopped - name: Start 4 load-balanced containers docker_container: name: "container{{ item }}" recreate: yes image: someuser/anotherappimage command: sleep 1d with_sequence: count=4 - name: remove container docker_container: name: ohno state: absent - name: Syslogging output docker_container: name: myservice image: busybox log_driver: syslog log_options: syslog-address: tcp://my-syslog-server:514 syslog-facility: daemon # NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for # older docker installs, use "syslog-tag" instead tag: myservice - name: Create db container and connect to network docker_container: name: db_test image: "postgres:latest" networks: - name: "{{ docker_network_name }}" - name: Start container, connect to network and link docker_container: name: sleeper image: ubuntu:14.04 networks: - name: TestingNet ipv4_address: "172.1.1.100" aliases: - sleepyzz links: - db_test:db - name: TestingNet2 - name: Start a container with a command docker_container: name: sleepy image: ubuntu:14.04 command: ["sleep", "infinity"] - name: Add container to networks docker_container: name: sleepy networks: - name: TestingNet ipv4_address: 172.1.1.18 links: - sleeper - name: TestingNet2 ipv4_address: 172.1.10.20 - name: Update network with aliases docker_container: name: sleepy networks: - name: TestingNet aliases: - sleepyz - zzzz - name: Remove container from one network docker_container: name: sleepy networks: - name: TestingNet2 purge_networks: yes - name: Remove container from all networks docker_container: name: sleepy purge_networks: yes - name: Start a container and use an env file docker_container: name: agent image: jenkinsci/ssh-slave env_file: /var/tmp/jenkins/agent.env - name: Create a container with limited capabilities docker_container: name: sleepy image: ubuntu:16.04 command: sleep infinity capabilities: - sys_time cap_drop: - all - name: Finer container restart/update control docker_container: name: test image: ubuntu:18.04 env: arg1: "true" arg2: "whatever" volumes: - /tmp:/tmp comparisons: image: ignore # don't restart containers with older versions of the image env: strict # we want precisely this environment volumes: allow_more_present # if there are more volumes, that's ok, as long as `/tmp:/tmp` is there - name: Finer container restart/update control II docker_container: name: test image: ubuntu:18.04 env: arg1: "true" arg2: "whatever" comparisons: '*': ignore # by default, ignore *all* options (including image) env: strict # except for environment variables; there, we want to be strict - name: Start container with healthstatus docker_container: name: nginx-proxy image: nginx:1.13 state: started healthcheck: # Check if nginx server is healthy by curl'ing the server. # If this fails or timeouts, the healthcheck fails. test: ["CMD", "curl", "--fail", "http://nginx.host.com"] interval: 1m30s timeout: 10s retries: 3 start_period: 30s - name: Remove healthcheck from container docker_container: name: nginx-proxy image: nginx:1.13 state: started healthcheck: # The "NONE" check needs to be specified test: ["NONE"] - name: start container with block device read limit docker_container: name: test image: ubuntu:18.04 state: started device_read_bps: # Limit read rate for /dev/sda to 20 mebibytes per second - path: /dev/sda rate: 20M device_read_iops: # Limit read rate for /dev/sdb to 300 IO per second - path: /dev/sdb rate: 300 ''' RETURN = ''' container: description: - Facts representing the current state of the container. Matches the docker inspection output. - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts are also accessible directly as C(docker_container). Note that the returned fact will be removed in Ansible 2.12. - Before 2.3 this was C(ansible_docker_container) but was renamed in 2.3 to C(docker_container) due to conflicts with the connection plugin. - Empty if C(state) is I(absent) - If detached is I(False), will include Output attribute containing any output from container run. returned: always type: dict sample: '{ "AppArmorProfile": "", "Args": [], "Config": { "AttachStderr": false, "AttachStdin": false, "AttachStdout": false, "Cmd": [ "/usr/bin/supervisord" ], "Domainname": "", "Entrypoint": null, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "ExposedPorts": { "443/tcp": {}, "80/tcp": {} }, "Hostname": "8e47bf643eb9", "Image": "lnmp_nginx:v1", "Labels": {}, "OnBuild": null, "OpenStdin": false, "StdinOnce": false, "Tty": false, "User": "", "Volumes": { "/tmp/lnmp/nginx-sites/logs/": {} }, ... }' ''' import os import re import shlex import traceback from distutils.version import LooseVersion from ansible.module_utils.common.text.formatters import human_to_bytes from ansible.module_utils.docker.common import ( AnsibleDockerClient, DifferenceTracker, DockerBaseClass, compare_generic, is_image_name_id, sanitize_result, clean_dict_booleans_for_docker_api, omit_none_from_dict, parse_healthcheck, DOCKER_COMMON_ARGS, RequestException, ) from ansible.module_utils.six import string_types try: from docker import utils from ansible.module_utils.docker.common import docker_version if LooseVersion(docker_version) >= LooseVersion('1.10.0'): from docker.types import Ulimit, LogConfig from docker import types as docker_types else: from docker.utils.types import Ulimit, LogConfig from docker.errors import DockerException, APIError, NotFound except Exception: # missing Docker SDK for Python handled in ansible.module_utils.docker.common pass REQUIRES_CONVERSION_TO_BYTES = [ 'kernel_memory', 'memory', 'memory_reservation', 'memory_swap', 'shm_size' ] def is_volume_permissions(mode): for part in mode.split(','): if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'): return False return True def parse_port_range(range_or_port, client): ''' Parses a string containing either a single port or a range of ports. Returns a list of integers for each port in the list. ''' if '-' in range_or_port: start, end = [int(port) for port in range_or_port.split('-')] if end < start: client.fail('Invalid port range: {0}'.format(range_or_port)) return list(range(start, end + 1)) else: return [int(range_or_port)] def split_colon_ipv6(text, client): ''' Split string by ':', while keeping IPv6 addresses in square brackets in one component. ''' if '[' not in text: return text.split(':') start = 0 result = [] while start < len(text): i = text.find('[', start) if i < 0: result.extend(text[start:].split(':')) break j = text.find(']', i) if j < 0: client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1)) result.extend(text[start:i].split(':')) k = text.find(':', j) if k < 0: result[-1] += text[i:] start = len(text) else: result[-1] += text[i:k] if k == len(text): result.append('') break start = k + 1 return result class TaskParameters(DockerBaseClass): ''' Access and parse module parameters ''' def __init__(self, client): super(TaskParameters, self).__init__() self.client = client self.auto_remove = None self.blkio_weight = None self.capabilities = None self.cap_drop = None self.cleanup = None self.command = None self.cpu_period = None self.cpu_quota = None self.cpuset_cpus = None self.cpuset_mems = None self.cpu_shares = None self.detach = None self.debug = None self.devices = None self.device_read_bps = None self.device_write_bps = None self.device_read_iops = None self.device_write_iops = None self.dns_servers = None self.dns_opts = None self.dns_search_domains = None self.domainname = None self.env = None self.env_file = None self.entrypoint = None self.etc_hosts = None self.exposed_ports = None self.force_kill = None self.groups = None self.healthcheck = None self.hostname = None self.ignore_image = None self.image = None self.init = None self.interactive = None self.ipc_mode = None self.keep_volumes = None self.kernel_memory = None self.kill_signal = None self.labels = None self.links = None self.log_driver = None self.output_logs = None self.log_options = None self.mac_address = None self.memory = None self.memory_reservation = None self.memory_swap = None self.memory_swappiness = None self.mounts = None self.name = None self.network_mode = None self.userns_mode = None self.networks = None self.networks_cli_compatible = None self.oom_killer = None self.oom_score_adj = None self.paused = None self.pid_mode = None self.pids_limit = None self.privileged = None self.purge_networks = None self.pull = None self.read_only = None self.recreate = None self.restart = None self.restart_retries = None self.restart_policy = None self.runtime = None self.shm_size = None self.security_opts = None self.state = None self.stop_signal = None self.stop_timeout = None self.tmpfs = None self.trust_image_content = None self.tty = None self.user = None self.uts = None self.volumes = None self.volume_binds = dict() self.volumes_from = None self.volume_driver = None self.working_dir = None for key, value in client.module.params.items(): setattr(self, key, value) self.comparisons = client.comparisons # If state is 'absent', parameters do not have to be parsed or interpreted. # Only the container's name is needed. if self.state == 'absent': return if self.groups: # In case integers are passed as groups, we need to convert them to # strings as docker internally treats them as strings. self.groups = [str(g) for g in self.groups] for param_name in REQUIRES_CONVERSION_TO_BYTES: if client.module.params.get(param_name): try: setattr(self, param_name, human_to_bytes(client.module.params.get(param_name))) except ValueError as exc: self.fail("Failed to convert %s to bytes: %s" % (param_name, exc)) self.publish_all_ports = False self.published_ports = self._parse_publish_ports() if self.published_ports in ('all', 'ALL'): self.publish_all_ports = True self.published_ports = None self.ports = self._parse_exposed_ports(self.published_ports) self.log("expose ports:") self.log(self.ports, pretty_print=True) self.links = self._parse_links(self.links) if self.volumes: self.volumes = self._expand_host_paths() self.tmpfs = self._parse_tmpfs() self.env = self._get_environment() self.ulimits = self._parse_ulimits() self.sysctls = self._parse_sysctls() self.log_config = self._parse_log_config() try: self.healthcheck, self.disable_healthcheck = parse_healthcheck(self.healthcheck) except ValueError as e: self.fail(str(e)) self.exp_links = None self.volume_binds = self._get_volume_binds(self.volumes) self.pid_mode = self._replace_container_names(self.pid_mode) self.ipc_mode = self._replace_container_names(self.ipc_mode) self.network_mode = self._replace_container_names(self.network_mode) self.log("volumes:") self.log(self.volumes, pretty_print=True) self.log("volume binds:") self.log(self.volume_binds, pretty_print=True) if self.networks: for network in self.networks: network['id'] = self._get_network_id(network['name']) if not network['id']: self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name']) if network.get('links'): network['links'] = self._parse_links(network['links']) if self.mac_address: # Ensure the MAC address uses colons instead of hyphens for later comparison self.mac_address = self.mac_address.replace('-', ':') if self.entrypoint: # convert from list to str. self.entrypoint = ' '.join([str(x) for x in self.entrypoint]) if self.command: # convert from list to str if isinstance(self.command, list): self.command = ' '.join([str(x) for x in self.command]) self.mounts_opt, self.expected_mounts = self._process_mounts() self._check_mount_target_collisions() for param_name in ["device_read_bps", "device_write_bps"]: if client.module.params.get(param_name): self._process_rate_bps(option=param_name) for param_name in ["device_read_iops", "device_write_iops"]: if client.module.params.get(param_name): self._process_rate_iops(option=param_name) def fail(self, msg): self.client.fail(msg) @property def update_parameters(self): ''' Returns parameters used to update a container ''' update_parameters = dict( blkio_weight='blkio_weight', cpu_period='cpu_period', cpu_quota='cpu_quota', cpu_shares='cpu_shares', cpuset_cpus='cpuset_cpus', cpuset_mems='cpuset_mems', mem_limit='memory', mem_reservation='memory_reservation', memswap_limit='memory_swap', kernel_memory='kernel_memory', ) result = dict() for key, value in update_parameters.items(): if getattr(self, value, None) is not None: if self.client.option_minimal_versions[value]['supported']: result[key] = getattr(self, value) return result @property def create_parameters(self): ''' Returns parameters used to create a container ''' create_params = dict( command='command', domainname='domainname', hostname='hostname', user='user', detach='detach', stdin_open='interactive', tty='tty', ports='ports', environment='env', name='name', entrypoint='entrypoint', mac_address='mac_address', labels='labels', stop_signal='stop_signal', working_dir='working_dir', stop_timeout='stop_timeout', healthcheck='healthcheck', ) if self.client.docker_py_version < LooseVersion('3.0'): # cpu_shares and volume_driver moved to create_host_config in > 3 create_params['cpu_shares'] = 'cpu_shares' create_params['volume_driver'] = 'volume_driver' result = dict( host_config=self._host_config(), volumes=self._get_mounts(), ) for key, value in create_params.items(): if getattr(self, value, None) is not None: if self.client.option_minimal_versions[value]['supported']: result[key] = getattr(self, value) if self.networks_cli_compatible and self.networks: network = self.networks[0] params = dict() for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'): if network.get(para): params[para] = network[para] network_config = dict() network_config[network['name']] = self.client.create_endpoint_config(**params) result['networking_config'] = self.client.create_networking_config(network_config) return result def _expand_host_paths(self): new_vols = [] for vol in self.volumes: if ':' in vol: if len(vol.split(':')) == 3: host, container, mode = vol.split(':') if not is_volume_permissions(mode): self.fail('Found invalid volumes mode: {0}'.format(mode)) if re.match(r'[.~]', host): host = os.path.abspath(os.path.expanduser(host)) new_vols.append("%s:%s:%s" % (host, container, mode)) continue elif len(vol.split(':')) == 2: parts = vol.split(':') if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]): host = os.path.abspath(os.path.expanduser(parts[0])) new_vols.append("%s:%s:rw" % (host, parts[1])) continue new_vols.append(vol) return new_vols def _get_mounts(self): ''' Return a list of container mounts. :return: ''' result = [] if self.volumes: for vol in self.volumes: if ':' in vol: if len(vol.split(':')) == 3: dummy, container, dummy = vol.split(':') result.append(container) continue if len(vol.split(':')) == 2: parts = vol.split(':') if not is_volume_permissions(parts[1]): result.append(parts[1]) continue result.append(vol) self.log("mounts:") self.log(result, pretty_print=True) return result def _host_config(self): ''' Returns parameters used to create a HostConfig object ''' host_config_params = dict( port_bindings='published_ports', publish_all_ports='publish_all_ports', links='links', privileged='privileged', dns='dns_servers', dns_opt='dns_opts', dns_search='dns_search_domains', binds='volume_binds', volumes_from='volumes_from', network_mode='network_mode', userns_mode='userns_mode', cap_add='capabilities', cap_drop='cap_drop', extra_hosts='etc_hosts', read_only='read_only', ipc_mode='ipc_mode', security_opt='security_opts', ulimits='ulimits', sysctls='sysctls', log_config='log_config', mem_limit='memory', memswap_limit='memory_swap', mem_swappiness='memory_swappiness', oom_score_adj='oom_score_adj', oom_kill_disable='oom_killer', shm_size='shm_size', group_add='groups', devices='devices', pid_mode='pid_mode', tmpfs='tmpfs', init='init', uts_mode='uts', runtime='runtime', auto_remove='auto_remove', device_read_bps='device_read_bps', device_write_bps='device_write_bps', device_read_iops='device_read_iops', device_write_iops='device_write_iops', pids_limit='pids_limit', mounts='mounts', ) if self.client.docker_py_version >= LooseVersion('1.9') and self.client.docker_api_version >= LooseVersion('1.22'): # blkio_weight can always be updated, but can only be set on creation # when Docker SDK for Python and Docker API are new enough host_config_params['blkio_weight'] = 'blkio_weight' if self.client.docker_py_version >= LooseVersion('3.0'): # cpu_shares and volume_driver moved to create_host_config in > 3 host_config_params['cpu_shares'] = 'cpu_shares' host_config_params['volume_driver'] = 'volume_driver' params = dict() for key, value in host_config_params.items(): if getattr(self, value, None) is not None: if self.client.option_minimal_versions[value]['supported']: params[key] = getattr(self, value) if self.restart_policy: params['restart_policy'] = dict(Name=self.restart_policy, MaximumRetryCount=self.restart_retries) if 'mounts' in params: params['mounts'] = self.mounts_opt return self.client.create_host_config(**params) @property def default_host_ip(self): ip = '0.0.0.0' if not self.networks: return ip for net in self.networks: if net.get('name'): try: network = self.client.inspect_network(net['name']) if network.get('Driver') == 'bridge' and \ network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'): ip = network['Options']['com.docker.network.bridge.host_binding_ipv4'] break except NotFound as nfe: self.client.fail( "Cannot inspect the network '{0}' to determine the default IP: {1}".format(net['name'], nfe), exception=traceback.format_exc() ) return ip def _parse_publish_ports(self): ''' Parse ports from docker CLI syntax ''' if self.published_ports is None: return None if 'all' in self.published_ports: return 'all' default_ip = self.default_host_ip binds = {} for port in self.published_ports: parts = split_colon_ipv6(str(port), self.client) container_port = parts[-1] protocol = '' if '/' in container_port: container_port, protocol = parts[-1].split('/') container_ports = parse_port_range(container_port, self.client) p_len = len(parts) if p_len == 1: port_binds = len(container_ports) * [(default_ip,)] elif p_len == 2: port_binds = [(default_ip, port) for port in parse_port_range(parts[0], self.client)] elif p_len == 3: # We only allow IPv4 and IPv6 addresses for the bind address ipaddr = parts[0] if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr): self.fail(('Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. ' 'Use the dig lookup to resolve hostnames. (Found hostname: {0})').format(ipaddr)) if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr): ipaddr = ipaddr[1:-1] if parts[1]: port_binds = [(ipaddr, port) for port in parse_port_range(parts[1], self.client)] else: port_binds = len(container_ports) * [(ipaddr,)] for bind, container_port in zip(port_binds, container_ports): idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port if idx in binds: old_bind = binds[idx] if isinstance(old_bind, list): old_bind.append(bind) else: binds[idx] = [old_bind, bind] else: binds[idx] = bind return binds def _get_volume_binds(self, volumes): ''' Extract host bindings, if any, from list of volume mapping strings. :return: dictionary of bind mappings ''' result = dict() if volumes: for vol in volumes: host = None if ':' in vol: parts = vol.split(':') if len(parts) == 3: host, container, mode = parts if not is_volume_permissions(mode): self.fail('Found invalid volumes mode: {0}'.format(mode)) elif len(parts) == 2: if not is_volume_permissions(parts[1]): host, container, mode = (vol.split(':') + ['rw']) if host is not None: result[host] = dict( bind=container, mode=mode ) return result def _parse_exposed_ports(self, published_ports): ''' Parse exposed ports from docker CLI-style ports syntax. ''' exposed = [] if self.exposed_ports: for port in self.exposed_ports: port = str(port).strip() protocol = 'tcp' match = re.search(r'(/.+$)', port) if match: protocol = match.group(1).replace('/', '') port = re.sub(r'/.+$', '', port) exposed.append((port, protocol)) if published_ports: # Any published port should also be exposed for publish_port in published_ports: match = False if isinstance(publish_port, string_types) and '/' in publish_port: port, protocol = publish_port.split('/') port = int(port) else: protocol = 'tcp' port = int(publish_port) for exposed_port in exposed: if exposed_port[1] != protocol: continue if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]: start_port, end_port = exposed_port[0].split('-') if int(start_port) <= port <= int(end_port): match = True elif exposed_port[0] == port: match = True if not match: exposed.append((port, protocol)) return exposed @staticmethod def _parse_links(links): ''' Turn links into a dictionary ''' if links is None: return None result = [] for link in links: parsed_link = link.split(':', 1) if len(parsed_link) == 2: result.append((parsed_link[0], parsed_link[1])) else: result.append((parsed_link[0], parsed_link[0])) return result def _parse_ulimits(self): ''' Turn ulimits into an array of Ulimit objects ''' if self.ulimits is None: return None results = [] for limit in self.ulimits: limits = dict() pieces = limit.split(':') if len(pieces) >= 2: limits['name'] = pieces[0] limits['soft'] = int(pieces[1]) limits['hard'] = int(pieces[1]) if len(pieces) == 3: limits['hard'] = int(pieces[2]) try: results.append(Ulimit(**limits)) except ValueError as exc: self.fail("Error parsing ulimits value %s - %s" % (limit, exc)) return results def _parse_sysctls(self): ''' Turn sysctls into an hash of Sysctl objects ''' return self.sysctls def _parse_log_config(self): ''' Create a LogConfig object ''' if self.log_driver is None: return None options = dict( Type=self.log_driver, Config=dict() ) if self.log_options is not None: options['Config'] = dict() for k, v in self.log_options.items(): if not isinstance(v, string_types): self.client.module.warn( "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. " "If this is not correct, or you want to avoid such warnings, please quote the value." % (k, str(v)) ) v = str(v) self.log_options[k] = v options['Config'][k] = v try: return LogConfig(**options) except ValueError as exc: self.fail('Error parsing logging options - %s' % (exc)) def _parse_tmpfs(self): ''' Turn tmpfs into a hash of Tmpfs objects ''' result = dict() if self.tmpfs is None: return result for tmpfs_spec in self.tmpfs: split_spec = tmpfs_spec.split(":", 1) if len(split_spec) > 1: result[split_spec[0]] = split_spec[1] else: result[split_spec[0]] = "" return result def _get_environment(self): """ If environment file is combined with explicit environment variables, the explicit environment variables take precedence. """ final_env = {} if self.env_file: parsed_env_file = utils.parse_env_file(self.env_file) for name, value in parsed_env_file.items(): final_env[name] = str(value) if self.env: for name, value in self.env.items(): if not isinstance(value, string_types): self.fail("Non-string value found for env option. Ambiguous env options must be " "wrapped in quotes to avoid them being interpreted. Key: %s" % (name, )) final_env[name] = str(value) return final_env def _get_network_id(self, network_name): network_id = None try: for network in self.client.networks(names=[network_name]): if network['Name'] == network_name: network_id = network['Id'] break except Exception as exc: self.fail("Error getting network id for %s - %s" % (network_name, str(exc))) return network_id def _process_mounts(self): if self.mounts is None: return None, None mounts_list = [] mounts_expected = [] for mount in self.mounts: target = mount['target'] datatype = mount['type'] mount_dict = dict(mount) # Sanity checks (so we don't wait for docker-py to barf on input) if mount_dict.get('source') is None and datatype != 'tmpfs': self.client.fail('source must be specified for mount "{0}" of type "{1}"'.format(target, datatype)) mount_option_types = dict( volume_driver='volume', volume_options='volume', propagation='bind', no_copy='volume', labels='volume', tmpfs_size='tmpfs', tmpfs_mode='tmpfs', ) for option, req_datatype in mount_option_types.items(): if mount_dict.get(option) is not None and datatype != req_datatype: self.client.fail('{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, datatype, req_datatype)) # Handle volume_driver and volume_options volume_driver = mount_dict.pop('volume_driver') volume_options = mount_dict.pop('volume_options') if volume_driver: if volume_options: volume_options = clean_dict_booleans_for_docker_api(volume_options) mount_dict['driver_config'] = docker_types.DriverConfig(name=volume_driver, options=volume_options) if mount_dict['labels']: mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels']) if mount_dict.get('tmpfs_size') is not None: try: mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size']) except ValueError as exc: self.fail('Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, exc)) if mount_dict.get('tmpfs_mode') is not None: try: mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8) except Exception as dummy: self.client.fail('tmp_fs mode of mount "{0}" is not an octal string!'.format(target)) # Fill expected mount dict mount_expected = dict(mount) mount_expected['tmpfs_size'] = mount_dict['tmpfs_size'] mount_expected['tmpfs_mode'] = mount_dict['tmpfs_mode'] # Add result to lists mounts_list.append(docker_types.Mount(**mount_dict)) mounts_expected.append(omit_none_from_dict(mount_expected)) return mounts_list, mounts_expected def _process_rate_bps(self, option): """ Format device_read_bps and device_write_bps option """ devices_list = [] for v in getattr(self, option): device_dict = dict((x.title(), y) for x, y in v.items()) device_dict['Rate'] = human_to_bytes(device_dict['Rate']) devices_list.append(device_dict) setattr(self, option, devices_list) def _process_rate_iops(self, option): """ Format device_read_iops and device_write_iops option """ devices_list = [] for v in getattr(self, option): device_dict = dict((x.title(), y) for x, y in v.items()) devices_list.append(device_dict) setattr(self, option, devices_list) def _replace_container_names(self, mode): """ Parse IPC and PID modes. If they contain a container name, replace with the container's ID. """ if mode is None or not mode.startswith('container:'): return mode container_name = mode[len('container:'):] # Try to inspect container to see whether this is an ID or a # name (and in the latter case, retrieve it's ID) container = self.client.get_container(container_name) if container is None: # If we can't find the container, issue a warning and continue with # what the user specified. self.client.module.warn('Cannot find a container with name or ID "{0}"'.format(container_name)) return mode return 'container:{0}'.format(container['Id']) def _check_mount_target_collisions(self): last = dict() def f(t, name): if t in last: if name == last[t]: self.client.fail('The mount point "{0}" appears twice in the {1} option'.format(t, name)) else: self.client.fail('The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t])) last[t] = name if self.expected_mounts: for t in [m['target'] for m in self.expected_mounts]: f(t, 'mounts') if self.volumes: for v in self.volumes: vs = v.split(':') f(vs[0 if len(vs) == 1 else 1], 'volumes') class Container(DockerBaseClass): def __init__(self, container, parameters): super(Container, self).__init__() self.raw = container self.Id = None self.container = container if container: self.Id = container['Id'] self.Image = container['Image'] self.log(self.container, pretty_print=True) self.parameters = parameters self.parameters.expected_links = None self.parameters.expected_ports = None self.parameters.expected_exposed = None self.parameters.expected_volumes = None self.parameters.expected_ulimits = None self.parameters.expected_sysctls = None self.parameters.expected_etc_hosts = None self.parameters.expected_env = None self.parameters_map = dict() self.parameters_map['expected_links'] = 'links' self.parameters_map['expected_ports'] = 'expected_ports' self.parameters_map['expected_exposed'] = 'exposed_ports' self.parameters_map['expected_volumes'] = 'volumes' self.parameters_map['expected_ulimits'] = 'ulimits' self.parameters_map['expected_sysctls'] = 'sysctls' self.parameters_map['expected_etc_hosts'] = 'etc_hosts' self.parameters_map['expected_env'] = 'env' self.parameters_map['expected_entrypoint'] = 'entrypoint' self.parameters_map['expected_binds'] = 'volumes' self.parameters_map['expected_cmd'] = 'command' self.parameters_map['expected_devices'] = 'devices' self.parameters_map['expected_healthcheck'] = 'healthcheck' self.parameters_map['expected_mounts'] = 'mounts' def fail(self, msg): self.parameters.client.fail(msg) @property def exists(self): return True if self.container else False @property def running(self): if self.container and self.container.get('State'): if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False): return True return False @property def paused(self): if self.container and self.container.get('State'): return self.container['State'].get('Paused', False) return False def _compare(self, a, b, compare): ''' Compare values a and b as described in compare. ''' return compare_generic(a, b, compare['comparison'], compare['type']) def _decode_mounts(self, mounts): if not mounts: return mounts result = [] empty_dict = dict() for mount in mounts: res = dict() res['type'] = mount.get('Type') res['source'] = mount.get('Source') res['target'] = mount.get('Target') res['read_only'] = mount.get('ReadOnly', False) # golang's omitempty for bool returns None for False res['consistency'] = mount.get('Consistency') res['propagation'] = mount.get('BindOptions', empty_dict).get('Propagation') res['no_copy'] = mount.get('VolumeOptions', empty_dict).get('NoCopy', False) res['labels'] = mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict) res['volume_driver'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name') res['volume_options'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict) res['tmpfs_size'] = mount.get('TmpfsOptions', empty_dict).get('SizeBytes') res['tmpfs_mode'] = mount.get('TmpfsOptions', empty_dict).get('Mode') result.append(res) return result def has_different_configuration(self, image): ''' Diff parameters vs existing container config. Returns tuple: (True | False, List of differences) ''' self.log('Starting has_different_configuration') self.parameters.expected_entrypoint = self._get_expected_entrypoint() self.parameters.expected_links = self._get_expected_links() self.parameters.expected_ports = self._get_expected_ports() self.parameters.expected_exposed = self._get_expected_exposed(image) self.parameters.expected_volumes = self._get_expected_volumes(image) self.parameters.expected_binds = self._get_expected_binds(image) self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits) self.parameters.expected_sysctls = self._get_expected_sysctls(self.parameters.sysctls) self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts') self.parameters.expected_env = self._get_expected_env(image) self.parameters.expected_cmd = self._get_expected_cmd() self.parameters.expected_devices = self._get_expected_devices() self.parameters.expected_healthcheck = self._get_expected_healthcheck() if not self.container.get('HostConfig'): self.fail("has_config_diff: Error parsing container properties. HostConfig missing.") if not self.container.get('Config'): self.fail("has_config_diff: Error parsing container properties. Config missing.") if not self.container.get('NetworkSettings'): self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.") host_config = self.container['HostConfig'] log_config = host_config.get('LogConfig', dict()) restart_policy = host_config.get('RestartPolicy', dict()) config = self.container['Config'] network = self.container['NetworkSettings'] # The previous version of the docker module ignored the detach state by # assuming if the container was running, it must have been detached. detach = not (config.get('AttachStderr') and config.get('AttachStdout')) # "ExposedPorts": null returns None type & causes AttributeError - PR #5517 if config.get('ExposedPorts') is not None: expected_exposed = [self._normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()] else: expected_exposed = [] # Map parameters to container inspect results config_mapping = dict( expected_cmd=config.get('Cmd'), domainname=config.get('Domainname'), hostname=config.get('Hostname'), user=config.get('User'), detach=detach, init=host_config.get('Init'), interactive=config.get('OpenStdin'), capabilities=host_config.get('CapAdd'), cap_drop=host_config.get('CapDrop'), expected_devices=host_config.get('Devices'), dns_servers=host_config.get('Dns'), dns_opts=host_config.get('DnsOptions'), dns_search_domains=host_config.get('DnsSearch'), expected_env=(config.get('Env') or []), expected_entrypoint=config.get('Entrypoint'), expected_etc_hosts=host_config['ExtraHosts'], expected_exposed=expected_exposed, groups=host_config.get('GroupAdd'), ipc_mode=host_config.get("IpcMode"), labels=config.get('Labels'), expected_links=host_config.get('Links'), mac_address=network.get('MacAddress'), memory_swappiness=host_config.get('MemorySwappiness'), network_mode=host_config.get('NetworkMode'), userns_mode=host_config.get('UsernsMode'), oom_killer=host_config.get('OomKillDisable'), oom_score_adj=host_config.get('OomScoreAdj'), pid_mode=host_config.get('PidMode'), privileged=host_config.get('Privileged'), expected_ports=host_config.get('PortBindings'), read_only=host_config.get('ReadonlyRootfs'), restart_policy=restart_policy.get('Name'), runtime=host_config.get('Runtime'), shm_size=host_config.get('ShmSize'), security_opts=host_config.get("SecurityOpt"), stop_signal=config.get("StopSignal"), tmpfs=host_config.get('Tmpfs'), tty=config.get('Tty'), expected_ulimits=host_config.get('Ulimits'), expected_sysctls=host_config.get('Sysctls'), uts=host_config.get('UTSMode'), expected_volumes=config.get('Volumes'), expected_binds=host_config.get('Binds'), volume_driver=host_config.get('VolumeDriver'), volumes_from=host_config.get('VolumesFrom'), working_dir=config.get('WorkingDir'), publish_all_ports=host_config.get('PublishAllPorts'), expected_healthcheck=config.get('Healthcheck'), disable_healthcheck=(not config.get('Healthcheck') or config.get('Healthcheck').get('Test') == ['NONE']), device_read_bps=host_config.get('BlkioDeviceReadBps'), device_write_bps=host_config.get('BlkioDeviceWriteBps'), device_read_iops=host_config.get('BlkioDeviceReadIOps'), device_write_iops=host_config.get('BlkioDeviceWriteIOps'), pids_limit=host_config.get('PidsLimit'), # According to https://github.com/moby/moby/, support for HostConfig.Mounts # has been included at least since v17.03.0-ce, which has API version 1.26. # The previous tag, v1.9.1, has API version 1.21 and does not have # HostConfig.Mounts. I have no idea what about API 1.25... expected_mounts=self._decode_mounts(host_config.get('Mounts')), ) # Options which don't make sense without their accompanying option if self.parameters.restart_policy: config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount') if self.parameters.log_driver: config_mapping['log_driver'] = log_config.get('Type') config_mapping['log_options'] = log_config.get('Config') if self.parameters.client.option_minimal_versions['auto_remove']['supported']: # auto_remove is only supported in Docker SDK for Python >= 2.0.0; unfortunately # it has a default value, that's why we have to jump through the hoops here config_mapping['auto_remove'] = host_config.get('AutoRemove') if self.parameters.client.option_minimal_versions['stop_timeout']['supported']: # stop_timeout is only supported in Docker SDK for Python >= 2.1. Note that # stop_timeout has a hybrid role, in that it used to be something only used # for stopping containers, and is now also used as a container property. # That's why it needs special handling here. config_mapping['stop_timeout'] = config.get('StopTimeout') if self.parameters.client.docker_api_version < LooseVersion('1.22'): # For docker API < 1.22, update_container() is not supported. Thus # we need to handle all limits which are usually handled by # update_container() as configuration changes which require a container # restart. config_mapping.update(dict( blkio_weight=host_config.get('BlkioWeight'), cpu_period=host_config.get('CpuPeriod'), cpu_quota=host_config.get('CpuQuota'), cpu_shares=host_config.get('CpuShares'), cpuset_cpus=host_config.get('CpusetCpus'), cpuset_mems=host_config.get('CpusetMems'), kernel_memory=host_config.get("KernelMemory"), memory=host_config.get('Memory'), memory_reservation=host_config.get('MemoryReservation'), memory_swap=host_config.get('MemorySwap'), )) differences = DifferenceTracker() for key, value in config_mapping.items(): minimal_version = self.parameters.client.option_minimal_versions.get(key, {}) if not minimal_version.get('supported', True): continue compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)] self.log('check differences %s %s vs %s (%s)' % (key, getattr(self.parameters, key), str(value), compare)) if getattr(self.parameters, key, None) is not None: match = self._compare(getattr(self.parameters, key), value, compare) if not match: # no match. record the differences p = getattr(self.parameters, key) c = value if compare['type'] == 'set': # Since the order does not matter, sort so that the diff output is better. if p is not None: p = sorted(p) if c is not None: c = sorted(c) elif compare['type'] == 'set(dict)': # Since the order does not matter, sort so that the diff output is better. if key == 'expected_mounts': # For selected values, use one entry as key def sort_key_fn(x): return x['target'] else: # We sort the list of dictionaries by using the sorted items of a dict as its key. def sort_key_fn(x): return sorted((a, str(b)) for a, b in x.items()) if p is not None: p = sorted(p, key=sort_key_fn) if c is not None: c = sorted(c, key=sort_key_fn) differences.add(key, parameter=p, active=c) has_differences = not differences.empty return has_differences, differences def has_different_resource_limits(self): ''' Diff parameters and container resource limits ''' if not self.container.get('HostConfig'): self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.") if self.parameters.client.docker_api_version < LooseVersion('1.22'): # update_container() call not supported return False, [] host_config = self.container['HostConfig'] config_mapping = dict( blkio_weight=host_config.get('BlkioWeight'), cpu_period=host_config.get('CpuPeriod'), cpu_quota=host_config.get('CpuQuota'), cpu_shares=host_config.get('CpuShares'), cpuset_cpus=host_config.get('CpusetCpus'), cpuset_mems=host_config.get('CpusetMems'), kernel_memory=host_config.get("KernelMemory"), memory=host_config.get('Memory'), memory_reservation=host_config.get('MemoryReservation'), memory_swap=host_config.get('MemorySwap'), ) differences = DifferenceTracker() for key, value in config_mapping.items(): if getattr(self.parameters, key, None): compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)] match = self._compare(getattr(self.parameters, key), value, compare) if not match: # no match. record the differences differences.add(key, parameter=getattr(self.parameters, key), active=value) different = not differences.empty return different, differences def has_network_differences(self): ''' Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6 ''' different = False differences = [] if not self.parameters.networks: return different, differences if not self.container.get('NetworkSettings'): self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.") connected_networks = self.container['NetworkSettings']['Networks'] for network in self.parameters.networks: if connected_networks.get(network['name'], None) is None: different = True differences.append(dict( parameter=network, container=None )) else: diff = False if network.get('ipv4_address') and network['ipv4_address'] != connected_networks[network['name']].get('IPAddress'): diff = True if network.get('ipv6_address') and network['ipv6_address'] != connected_networks[network['name']].get('GlobalIPv6Address'): diff = True if network.get('aliases'): if not compare_generic(network['aliases'], connected_networks[network['name']].get('Aliases'), 'allow_more_present', 'set'): diff = True if network.get('links'): expected_links = [] for link, alias in network['links']: expected_links.append("%s:%s" % (link, alias)) if not compare_generic(expected_links, connected_networks[network['name']].get('Links'), 'allow_more_present', 'set'): diff = True if diff: different = True differences.append(dict( parameter=network, container=dict( name=network['name'], ipv4_address=connected_networks[network['name']].get('IPAddress'), ipv6_address=connected_networks[network['name']].get('GlobalIPv6Address'), aliases=connected_networks[network['name']].get('Aliases'), links=connected_networks[network['name']].get('Links') ) )) return different, differences def has_extra_networks(self): ''' Check if the container is connected to non-requested networks ''' extra_networks = [] extra = False if not self.container.get('NetworkSettings'): self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.") connected_networks = self.container['NetworkSettings'].get('Networks') if connected_networks: for network, network_config in connected_networks.items(): keep = False if self.parameters.networks: for expected_network in self.parameters.networks: if expected_network['name'] == network: keep = True if not keep: extra = True extra_networks.append(dict(name=network, id=network_config['NetworkID'])) return extra, extra_networks def _get_expected_devices(self): if not self.parameters.devices: return None expected_devices = [] for device in self.parameters.devices: parts = device.split(':') if len(parts) == 1: expected_devices.append( dict( CgroupPermissions='rwm', PathInContainer=parts[0], PathOnHost=parts[0] )) elif len(parts) == 2: parts = device.split(':') expected_devices.append( dict( CgroupPermissions='rwm', PathInContainer=parts[1], PathOnHost=parts[0] ) ) else: expected_devices.append( dict( CgroupPermissions=parts[2], PathInContainer=parts[1], PathOnHost=parts[0] )) return expected_devices def _get_expected_entrypoint(self): if not self.parameters.entrypoint: return None return shlex.split(self.parameters.entrypoint) def _get_expected_ports(self): if not self.parameters.published_ports: return None expected_bound_ports = {} for container_port, config in self.parameters.published_ports.items(): if isinstance(container_port, int): container_port = "%s/tcp" % container_port if len(config) == 1: if isinstance(config[0], int): expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}] else: expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}] elif isinstance(config[0], tuple): expected_bound_ports[container_port] = [] for host_ip, host_port in config: expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': str(host_port)}) else: expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}] return expected_bound_ports def _get_expected_links(self): if self.parameters.links is None: return None self.log('parameter links:') self.log(self.parameters.links, pretty_print=True) exp_links = [] for link, alias in self.parameters.links: exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias)) return exp_links def _get_expected_binds(self, image): self.log('_get_expected_binds') image_vols = [] if image: image_vols = self._get_image_binds(image[self.parameters.client.image_inspect_source].get('Volumes')) param_vols = [] if self.parameters.volumes: for vol in self.parameters.volumes: host = None if ':' in vol: if len(vol.split(':')) == 3: host, container, mode = vol.split(':') if not is_volume_permissions(mode): self.fail('Found invalid volumes mode: {0}'.format(mode)) if len(vol.split(':')) == 2: parts = vol.split(':') if not is_volume_permissions(parts[1]): host, container, mode = vol.split(':') + ['rw'] if host: param_vols.append("%s:%s:%s" % (host, container, mode)) result = list(set(image_vols + param_vols)) self.log("expected_binds:") self.log(result, pretty_print=True) return result def _get_image_binds(self, volumes): ''' Convert array of binds to array of strings with format host_path:container_path:mode :param volumes: array of bind dicts :return: array of strings ''' results = [] if isinstance(volumes, dict): results += self._get_bind_from_dict(volumes) elif isinstance(volumes, list): for vol in volumes: results += self._get_bind_from_dict(vol) return results @staticmethod def _get_bind_from_dict(volume_dict): results = [] if volume_dict: for host_path, config in volume_dict.items(): if isinstance(config, dict) and config.get('bind'): container_path = config.get('bind') mode = config.get('mode', 'rw') results.append("%s:%s:%s" % (host_path, container_path, mode)) return results def _get_expected_volumes(self, image): self.log('_get_expected_volumes') expected_vols = dict() if image and image[self.parameters.client.image_inspect_source].get('Volumes'): expected_vols.update(image[self.parameters.client.image_inspect_source].get('Volumes')) if self.parameters.volumes: for vol in self.parameters.volumes: container = None if ':' in vol: if len(vol.split(':')) == 3: dummy, container, mode = vol.split(':') if not is_volume_permissions(mode): self.fail('Found invalid volumes mode: {0}'.format(mode)) if len(vol.split(':')) == 2: parts = vol.split(':') if not is_volume_permissions(parts[1]): dummy, container, mode = vol.split(':') + ['rw'] new_vol = dict() if container: new_vol[container] = dict() else: new_vol[vol] = dict() expected_vols.update(new_vol) if not expected_vols: expected_vols = None self.log("expected_volumes:") self.log(expected_vols, pretty_print=True) return expected_vols def _get_expected_env(self, image): self.log('_get_expected_env') expected_env = dict() if image and image[self.parameters.client.image_inspect_source].get('Env'): for env_var in image[self.parameters.client.image_inspect_source]['Env']: parts = env_var.split('=', 1) expected_env[parts[0]] = parts[1] if self.parameters.env: expected_env.update(self.parameters.env) param_env = [] for key, value in expected_env.items(): param_env.append("%s=%s" % (key, value)) return param_env def _get_expected_exposed(self, image): self.log('_get_expected_exposed') image_ports = [] if image: image_exposed_ports = image[self.parameters.client.image_inspect_source].get('ExposedPorts') or {} image_ports = [self._normalize_port(p) for p in image_exposed_ports.keys()] param_ports = [] if self.parameters.ports: param_ports = [str(p[0]) + '/' + p[1] for p in self.parameters.ports] result = list(set(image_ports + param_ports)) self.log(result, pretty_print=True) return result def _get_expected_ulimits(self, config_ulimits): self.log('_get_expected_ulimits') if config_ulimits is None: return None results = [] for limit in config_ulimits: results.append(dict( Name=limit.name, Soft=limit.soft, Hard=limit.hard )) return results def _get_expected_sysctls(self, config_sysctls): self.log('_get_expected_sysctls') if config_sysctls is None: return None result = dict() for key, value in config_sysctls.items(): result[key] = str(value) return result def _get_expected_cmd(self): self.log('_get_expected_cmd') if not self.parameters.command: return None return shlex.split(self.parameters.command) def _convert_simple_dict_to_list(self, param_name, join_with=':'): if getattr(self.parameters, param_name, None) is None: return None results = [] for key, value in getattr(self.parameters, param_name).items(): results.append("%s%s%s" % (key, join_with, value)) return results def _normalize_port(self, port): if '/' not in port: return port + '/tcp' return port def _get_expected_healthcheck(self): self.log('_get_expected_healthcheck') expected_healthcheck = dict() if self.parameters.healthcheck: expected_healthcheck.update([(k.title().replace("_", ""), v) for k, v in self.parameters.healthcheck.items()]) return expected_healthcheck class ContainerManager(DockerBaseClass): ''' Perform container management tasks ''' def __init__(self, client): super(ContainerManager, self).__init__() if client.module.params.get('log_options') and not client.module.params.get('log_driver'): client.module.warn('log_options is ignored when log_driver is not specified') if client.module.params.get('healthcheck') and not client.module.params.get('healthcheck').get('test'): client.module.warn('healthcheck is ignored when test is not specified') if client.module.params.get('restart_retries') is not None and not client.module.params.get('restart_policy'): client.module.warn('restart_retries is ignored when restart_policy is not specified') self.client = client self.parameters = TaskParameters(client) self.check_mode = self.client.check_mode self.results = {'changed': False, 'actions': []} self.diff = {} self.diff_tracker = DifferenceTracker() self.facts = {} state = self.parameters.state if state in ('stopped', 'started', 'present'): self.present(state) elif state == 'absent': self.absent() if not self.check_mode and not self.parameters.debug: self.results.pop('actions') if self.client.module._diff or self.parameters.debug: self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after() self.results['diff'] = self.diff if self.facts: self.results['ansible_facts'] = {'docker_container': self.facts} self.results['container'] = self.facts def present(self, state): container = self._get_container(self.parameters.name) was_running = container.running was_paused = container.paused container_created = False # If the image parameter was passed then we need to deal with the image # version comparison. Otherwise we handle this depending on whether # the container already runs or not; in the former case, in case the # container needs to be restarted, we use the existing container's # image ID. image = self._get_image() self.log(image, pretty_print=True) if not container.exists: # New container self.log('No container found') if not self.parameters.image: self.fail('Cannot create container when image is not specified!') self.diff_tracker.add('exists', parameter=True, active=False) new_container = self.container_create(self.parameters.image, self.parameters.create_parameters) if new_container: container = new_container container_created = True else: # Existing container different, differences = container.has_different_configuration(image) image_different = False if self.parameters.comparisons['image']['comparison'] == 'strict': image_different = self._image_is_different(image, container) if image_different or different or self.parameters.recreate: self.diff_tracker.merge(differences) self.diff['differences'] = differences.get_legacy_docker_container_diffs() if image_different: self.diff['image_different'] = True self.log("differences") self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True) image_to_use = self.parameters.image if not image_to_use and container and container.Image: image_to_use = container.Image if not image_to_use: self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!') if container.running: self.container_stop(container.Id) self.container_remove(container.Id) new_container = self.container_create(image_to_use, self.parameters.create_parameters) if new_container: container = new_container container_created = True if container and container.exists: container = self.update_limits(container) container = self.update_networks(container, container_created) if state == 'started' and not container.running: self.diff_tracker.add('running', parameter=True, active=was_running) container = self.container_start(container.Id) elif state == 'started' and self.parameters.restart: self.diff_tracker.add('running', parameter=True, active=was_running) self.diff_tracker.add('restarted', parameter=True, active=False) container = self.container_restart(container.Id) elif state == 'stopped' and container.running: self.diff_tracker.add('running', parameter=False, active=was_running) self.container_stop(container.Id) container = self._get_container(container.Id) if state == 'started' and container.paused != self.parameters.paused: self.diff_tracker.add('paused', parameter=self.parameters.paused, active=was_paused) if not self.check_mode: try: if self.parameters.paused: self.client.pause(container=container.Id) else: self.client.unpause(container=container.Id) except Exception as exc: self.fail("Error %s container %s: %s" % ( "pausing" if self.parameters.paused else "unpausing", container.Id, str(exc) )) container = self._get_container(container.Id) self.results['changed'] = True self.results['actions'].append(dict(set_paused=self.parameters.paused)) self.facts = container.raw def absent(self): container = self._get_container(self.parameters.name) if container.exists: if container.running: self.diff_tracker.add('running', parameter=False, active=True) self.container_stop(container.Id) self.diff_tracker.add('exists', parameter=False, active=True) self.container_remove(container.Id) def fail(self, msg, **kwargs): self.client.fail(msg, **kwargs) def _output_logs(self, msg): self.client.module.log(msg=msg) def _get_container(self, container): ''' Expects container ID or Name. Returns a container object ''' return Container(self.client.get_container(container), self.parameters) def _get_image(self): if not self.parameters.image: self.log('No image specified') return None if is_image_name_id(self.parameters.image): image = self.client.find_image_by_id(self.parameters.image) else: repository, tag = utils.parse_repository_tag(self.parameters.image) if not tag: tag = "latest" image = self.client.find_image(repository, tag) if not self.check_mode: if not image or self.parameters.pull: self.log("Pull the image.") image, alreadyToLatest = self.client.pull_image(repository, tag) if alreadyToLatest: self.results['changed'] = False else: self.results['changed'] = True self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) self.log("image") self.log(image, pretty_print=True) return image def _image_is_different(self, image, container): if image and image.get('Id'): if container and container.Image: if image.get('Id') != container.Image: self.diff_tracker.add('image', parameter=image.get('Id'), active=container.Image) return True return False def update_limits(self, container): limits_differ, different_limits = container.has_different_resource_limits() if limits_differ: self.log("limit differences:") self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True) self.diff_tracker.merge(different_limits) if limits_differ and not self.check_mode: self.container_update(container.Id, self.parameters.update_parameters) return self._get_container(container.Id) return container def update_networks(self, container, container_created): updated_container = container if self.parameters.comparisons['networks']['comparison'] != 'ignore' or container_created: has_network_differences, network_differences = container.has_network_differences() if has_network_differences: if self.diff.get('differences'): self.diff['differences'].append(dict(network_differences=network_differences)) else: self.diff['differences'] = [dict(network_differences=network_differences)] for netdiff in network_differences: self.diff_tracker.add( 'network.{0}'.format(netdiff['parameter']['name']), parameter=netdiff['parameter'], active=netdiff['container'] ) self.results['changed'] = True updated_container = self._add_networks(container, network_differences) if (self.parameters.comparisons['networks']['comparison'] == 'strict' and self.parameters.networks is not None) or self.parameters.purge_networks: has_extra_networks, extra_networks = container.has_extra_networks() if has_extra_networks: if self.diff.get('differences'): self.diff['differences'].append(dict(purge_networks=extra_networks)) else: self.diff['differences'] = [dict(purge_networks=extra_networks)] for extra_network in extra_networks: self.diff_tracker.add( 'network.{0}'.format(extra_network['name']), active=extra_network ) self.results['changed'] = True updated_container = self._purge_networks(container, extra_networks) return updated_container def _add_networks(self, container, differences): for diff in differences: # remove the container from the network, if connected if diff.get('container'): self.results['actions'].append(dict(removed_from_network=diff['parameter']['name'])) if not self.check_mode: try: self.client.disconnect_container_from_network(container.Id, diff['parameter']['id']) except Exception as exc: self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], str(exc))) # connect to the network params = dict() for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'): if diff['parameter'].get(para): params[para] = diff['parameter'][para] self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params)) if not self.check_mode: try: self.log("Connecting container to network %s" % diff['parameter']['id']) self.log(params, pretty_print=True) self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params) except Exception as exc: self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], str(exc))) return self._get_container(container.Id) def _purge_networks(self, container, networks): for network in networks: self.results['actions'].append(dict(removed_from_network=network['name'])) if not self.check_mode: try: self.client.disconnect_container_from_network(container.Id, network['name']) except Exception as exc: self.fail("Error disconnecting container from network %s - %s" % (network['name'], str(exc))) return self._get_container(container.Id) def container_create(self, image, create_parameters): self.log("create container") self.log("image: %s parameters:" % image) self.log(create_parameters, pretty_print=True) self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters)) self.results['changed'] = True new_container = None if not self.check_mode: try: new_container = self.client.create_container(image, **create_parameters) self.client.report_warnings(new_container) except Exception as exc: self.fail("Error creating container: %s" % str(exc)) return self._get_container(new_container['Id']) return new_container def container_start(self, container_id): self.log("start container %s" % (container_id)) self.results['actions'].append(dict(started=container_id)) self.results['changed'] = True if not self.check_mode: try: self.client.start(container=container_id) except Exception as exc: self.fail("Error starting container %s: %s" % (container_id, str(exc))) if not self.parameters.detach: if self.client.docker_py_version >= LooseVersion('3.0'): status = self.client.wait(container_id)['StatusCode'] else: status = self.client.wait(container_id) if self.parameters.auto_remove: output = "Cannot retrieve result as auto_remove is enabled" if self.parameters.output_logs: self.client.module.warn('Cannot output_logs if auto_remove is enabled!') else: config = self.client.inspect_container(container_id) logging_driver = config['HostConfig']['LogConfig']['Type'] if logging_driver in ('json-file', 'journald'): output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False) if self.parameters.output_logs: self._output_logs(msg=output) else: output = "Result logged using `%s` driver" % logging_driver if status != 0: self.fail(output, status=status) if self.parameters.cleanup: self.container_remove(container_id, force=True) insp = self._get_container(container_id) if insp.raw: insp.raw['Output'] = output else: insp.raw = dict(Output=output) return insp return self._get_container(container_id) def container_remove(self, container_id, link=False, force=False): volume_state = (not self.parameters.keep_volumes) self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force)) self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) self.results['changed'] = True response = None if not self.check_mode: count = 0 while True: try: response = self.client.remove_container(container_id, v=volume_state, link=link, force=force) except NotFound as dummy: pass except APIError as exc: if 'Unpause the container before stopping or killing' in exc.explanation: # New docker daemon versions do not allow containers to be removed # if they are paused. Make sure we don't end up in an infinite loop. if count == 3: self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, str(exc))) count += 1 # Unpause try: self.client.unpause(container=container_id) except Exception as exc2: self.fail("Error unpausing container %s for removal: %s" % (container_id, str(exc2))) # Now try again continue if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation: pass else: self.fail("Error removing container %s: %s" % (container_id, str(exc))) except Exception as exc: self.fail("Error removing container %s: %s" % (container_id, str(exc))) # We only loop when explicitly requested by 'continue' break return response def container_update(self, container_id, update_parameters): if update_parameters: self.log("update container %s" % (container_id)) self.log(update_parameters, pretty_print=True) self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters)) self.results['changed'] = True if not self.check_mode and callable(getattr(self.client, 'update_container')): try: result = self.client.update_container(container_id, **update_parameters) self.client.report_warnings(result) except Exception as exc: self.fail("Error updating container %s: %s" % (container_id, str(exc))) return self._get_container(container_id) def container_kill(self, container_id): self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal)) self.results['changed'] = True response = None if not self.check_mode: try: if self.parameters.kill_signal: response = self.client.kill(container_id, signal=self.parameters.kill_signal) else: response = self.client.kill(container_id) except Exception as exc: self.fail("Error killing container %s: %s" % (container_id, exc)) return response def container_restart(self, container_id): self.results['actions'].append(dict(restarted=container_id, timeout=self.parameters.stop_timeout)) self.results['changed'] = True if not self.check_mode: try: if self.parameters.stop_timeout: dummy = self.client.restart(container_id, timeout=self.parameters.stop_timeout) else: dummy = self.client.restart(container_id) except Exception as exc: self.fail("Error restarting container %s: %s" % (container_id, str(exc))) return self._get_container(container_id) def container_stop(self, container_id): if self.parameters.force_kill: self.container_kill(container_id) return self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout)) self.results['changed'] = True response = None if not self.check_mode: count = 0 while True: try: if self.parameters.stop_timeout: response = self.client.stop(container_id, timeout=self.parameters.stop_timeout) else: response = self.client.stop(container_id) except APIError as exc: if 'Unpause the container before stopping or killing' in exc.explanation: # New docker daemon versions do not allow containers to be removed # if they are paused. Make sure we don't end up in an infinite loop. if count == 3: self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, str(exc))) count += 1 # Unpause try: self.client.unpause(container=container_id) except Exception as exc2: self.fail("Error unpausing container %s for removal: %s" % (container_id, str(exc2))) # Now try again continue self.fail("Error stopping container %s: %s" % (container_id, str(exc))) except Exception as exc: self.fail("Error stopping container %s: %s" % (container_id, str(exc))) # We only loop when explicitly requested by 'continue' break return response def detect_ipvX_address_usage(client): ''' Helper function to detect whether any specified network uses ipv4_address or ipv6_address ''' for network in client.module.params.get("networks") or []: if network.get('ipv4_address') is not None or network.get('ipv6_address') is not None: return True return False class AnsibleDockerClientContainer(AnsibleDockerClient): # A list of module options which are not docker container properties __NON_CONTAINER_PROPERTY_OPTIONS = tuple([ 'env_file', 'force_kill', 'keep_volumes', 'ignore_image', 'name', 'pull', 'purge_networks', 'recreate', 'restart', 'state', 'trust_image_content', 'networks', 'cleanup', 'kill_signal', 'output_logs', 'paused' ] + list(DOCKER_COMMON_ARGS.keys())) def _parse_comparisons(self): comparisons = {} comp_aliases = {} # Put in defaults explicit_types = dict( command='list', devices='set(dict)', dns_search_domains='list', dns_servers='list', env='set', entrypoint='list', etc_hosts='set', mounts='set(dict)', networks='set(dict)', ulimits='set(dict)', device_read_bps='set(dict)', device_write_bps='set(dict)', device_read_iops='set(dict)', device_write_iops='set(dict)', ) all_options = set() # this is for improving user feedback when a wrong option was specified for comparison default_values = dict( stop_timeout='ignore', ) for option, data in self.module.argument_spec.items(): all_options.add(option) for alias in data.get('aliases', []): all_options.add(alias) # Ignore options which aren't used as container properties if option in self.__NON_CONTAINER_PROPERTY_OPTIONS and option != 'networks': continue # Determine option type if option in explicit_types: datatype = explicit_types[option] elif data['type'] == 'list': datatype = 'set' elif data['type'] == 'dict': datatype = 'dict' else: datatype = 'value' # Determine comparison type if option in default_values: comparison = default_values[option] elif datatype in ('list', 'value'): comparison = 'strict' else: comparison = 'allow_more_present' comparisons[option] = dict(type=datatype, comparison=comparison, name=option) # Keep track of aliases comp_aliases[option] = option for alias in data.get('aliases', []): comp_aliases[alias] = option # Process legacy ignore options if self.module.params['ignore_image']: comparisons['image']['comparison'] = 'ignore' if self.module.params['purge_networks']: comparisons['networks']['comparison'] = 'strict' # Process options if self.module.params.get('comparisons'): # If '*' appears in comparisons, process it first if '*' in self.module.params['comparisons']: value = self.module.params['comparisons']['*'] if value not in ('strict', 'ignore'): self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!") for option, v in comparisons.items(): if option == 'networks': # `networks` is special: only update if # some value is actually specified if self.module.params['networks'] is None: continue v['comparison'] = value # Now process all other comparisons. comp_aliases_used = {} for key, value in self.module.params['comparisons'].items(): if key == '*': continue # Find main key key_main = comp_aliases.get(key) if key_main is None: if key_main in all_options: self.fail("The module option '%s' cannot be specified in the comparisons dict, " "since it does not correspond to container's state!" % key) self.fail("Unknown module option '%s' in comparisons dict!" % key) if key_main in comp_aliases_used: self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main)) comp_aliases_used[key_main] = key # Check value and update accordingly if value in ('strict', 'ignore'): comparisons[key_main]['comparison'] = value elif value == 'allow_more_present': if comparisons[key_main]['type'] == 'value': self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value)) comparisons[key_main]['comparison'] = value else: self.fail("Unknown comparison mode '%s'!" % value) # Add implicit options comparisons['publish_all_ports'] = dict(type='value', comparison='strict', name='published_ports') comparisons['expected_ports'] = dict(type='dict', comparison=comparisons['published_ports']['comparison'], name='expected_ports') comparisons['disable_healthcheck'] = dict(type='value', comparison='ignore' if comparisons['healthcheck']['comparison'] == 'ignore' else 'strict', name='disable_healthcheck') # Check legacy values if self.module.params['ignore_image'] and comparisons['image']['comparison'] != 'ignore': self.module.warn('The ignore_image option has been overridden by the comparisons option!') if self.module.params['purge_networks'] and comparisons['networks']['comparison'] != 'strict': self.module.warn('The purge_networks option has been overridden by the comparisons option!') self.comparisons = comparisons def _get_additional_minimal_versions(self): stop_timeout_supported = self.docker_api_version >= LooseVersion('1.25') stop_timeout_needed_for_update = self.module.params.get("stop_timeout") is not None and self.module.params.get('state') != 'absent' if stop_timeout_supported: stop_timeout_supported = self.docker_py_version >= LooseVersion('2.1') if stop_timeout_needed_for_update and not stop_timeout_supported: # We warn (instead of fail) since in older versions, stop_timeout was not used # to update the container's configuration, but only when stopping a container. self.module.warn("Docker SDK for Python's version is %s. Minimum version required is 2.1 to update " "the container's stop_timeout configuration. " "If you use the 'docker-py' module, you have to switch to the 'docker' Python package." % (docker_version,)) else: if stop_timeout_needed_for_update and not stop_timeout_supported: # We warn (instead of fail) since in older versions, stop_timeout was not used # to update the container's configuration, but only when stopping a container. self.module.warn("Docker API version is %s. Minimum version required is 1.25 to set or " "update the container's stop_timeout configuration." % (self.docker_api_version_str,)) self.option_minimal_versions['stop_timeout']['supported'] = stop_timeout_supported def __init__(self, **kwargs): option_minimal_versions = dict( # internal options log_config=dict(), publish_all_ports=dict(), ports=dict(), volume_binds=dict(), name=dict(), # normal options device_read_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'), device_read_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'), device_write_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'), device_write_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'), dns_opts=dict(docker_api_version='1.21', docker_py_version='1.10.0'), ipc_mode=dict(docker_api_version='1.25'), mac_address=dict(docker_api_version='1.25'), oom_score_adj=dict(docker_api_version='1.22'), shm_size=dict(docker_api_version='1.22'), stop_signal=dict(docker_api_version='1.21'), tmpfs=dict(docker_api_version='1.22'), volume_driver=dict(docker_api_version='1.21'), memory_reservation=dict(docker_api_version='1.21'), kernel_memory=dict(docker_api_version='1.21'), auto_remove=dict(docker_py_version='2.1.0', docker_api_version='1.25'), healthcheck=dict(docker_py_version='2.0.0', docker_api_version='1.24'), init=dict(docker_py_version='2.2.0', docker_api_version='1.25'), runtime=dict(docker_py_version='2.4.0', docker_api_version='1.25'), sysctls=dict(docker_py_version='1.10.0', docker_api_version='1.24'), userns_mode=dict(docker_py_version='1.10.0', docker_api_version='1.23'), uts=dict(docker_py_version='3.5.0', docker_api_version='1.25'), pids_limit=dict(docker_py_version='1.10.0', docker_api_version='1.23'), mounts=dict(docker_py_version='2.6.0', docker_api_version='1.25'), # specials ipvX_address_supported=dict(docker_py_version='1.9.0', detect_usage=detect_ipvX_address_usage, usage_msg='ipv4_address or ipv6_address in networks'), stop_timeout=dict(), # see _get_additional_minimal_versions() ) super(AnsibleDockerClientContainer, self).__init__( option_minimal_versions=option_minimal_versions, option_minimal_versions_ignore_params=self.__NON_CONTAINER_PROPERTY_OPTIONS, **kwargs ) self.image_inspect_source = 'Config' if self.docker_api_version < LooseVersion('1.21'): self.image_inspect_source = 'ContainerConfig' self._get_additional_minimal_versions() self._parse_comparisons() def main(): argument_spec = dict( auto_remove=dict(type='bool', default=False), blkio_weight=dict(type='int'), capabilities=dict(type='list', elements='str'), cap_drop=dict(type='list', elements='str'), cleanup=dict(type='bool', default=False), command=dict(type='raw'), comparisons=dict(type='dict'), cpu_period=dict(type='int'), cpu_quota=dict(type='int'), cpuset_cpus=dict(type='str'), cpuset_mems=dict(type='str'), cpu_shares=dict(type='int'), detach=dict(type='bool', default=True), devices=dict(type='list', elements='str'), device_read_bps=dict(type='list', elements='dict', options=dict( path=dict(required=True, type='str'), rate=dict(required=True, type='str'), )), device_write_bps=dict(type='list', elements='dict', options=dict( path=dict(required=True, type='str'), rate=dict(required=True, type='str'), )), device_read_iops=dict(type='list', elements='dict', options=dict( path=dict(required=True, type='str'), rate=dict(required=True, type='int'), )), device_write_iops=dict(type='list', elements='dict', options=dict( path=dict(required=True, type='str'), rate=dict(required=True, type='int'), )), dns_servers=dict(type='list', elements='str'), dns_opts=dict(type='list', elements='str'), dns_search_domains=dict(type='list', elements='str'), domainname=dict(type='str'), entrypoint=dict(type='list', elements='str'), env=dict(type='dict'), env_file=dict(type='path'), etc_hosts=dict(type='dict'), exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']), force_kill=dict(type='bool', default=False, aliases=['forcekill']), groups=dict(type='list', elements='str'), healthcheck=dict(type='dict', options=dict( test=dict(type='raw'), interval=dict(type='str'), timeout=dict(type='str'), start_period=dict(type='str'), retries=dict(type='int'), )), hostname=dict(type='str'), ignore_image=dict(type='bool', default=False), image=dict(type='str'), init=dict(type='bool', default=False), interactive=dict(type='bool', default=False), ipc_mode=dict(type='str'), keep_volumes=dict(type='bool', default=True), kernel_memory=dict(type='str'), kill_signal=dict(type='str'), labels=dict(type='dict'), links=dict(type='list', elements='str'), log_driver=dict(type='str'), log_options=dict(type='dict', aliases=['log_opt']), mac_address=dict(type='str'), memory=dict(type='str', default='0'), memory_reservation=dict(type='str'), memory_swap=dict(type='str'), memory_swappiness=dict(type='int'), mounts=dict(type='list', elements='dict', options=dict( target=dict(type='str', required=True), source=dict(type='str'), type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'), read_only=dict(type='bool'), consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']), propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']), no_copy=dict(type='bool'), labels=dict(type='dict'), volume_driver=dict(type='str'), volume_options=dict(type='dict'), tmpfs_size=dict(type='str'), tmpfs_mode=dict(type='str'), )), name=dict(type='str', required=True), network_mode=dict(type='str'), networks=dict(type='list', elements='dict', options=dict( name=dict(type='str', required=True), ipv4_address=dict(type='str'), ipv6_address=dict(type='str'), aliases=dict(type='list', elements='str'), links=dict(type='list', elements='str'), )), networks_cli_compatible=dict(type='bool'), oom_killer=dict(type='bool'), oom_score_adj=dict(type='int'), output_logs=dict(type='bool', default=False), paused=dict(type='bool', default=False), pid_mode=dict(type='str'), pids_limit=dict(type='int'), privileged=dict(type='bool', default=False), published_ports=dict(type='list', elements='str', aliases=['ports']), pull=dict(type='bool', default=False), purge_networks=dict(type='bool', default=False), read_only=dict(type='bool', default=False), recreate=dict(type='bool', default=False), restart=dict(type='bool', default=False), restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']), restart_retries=dict(type='int'), runtime=dict(type='str'), security_opts=dict(type='list', elements='str'), shm_size=dict(type='str'), state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), stop_signal=dict(type='str'), stop_timeout=dict(type='int'), sysctls=dict(type='dict'), tmpfs=dict(type='list', elements='str'), trust_image_content=dict(type='bool', default=False), tty=dict(type='bool', default=False), ulimits=dict(type='list', elements='str'), user=dict(type='str'), userns_mode=dict(type='str'), uts=dict(type='str'), volume_driver=dict(type='str'), volumes=dict(type='list', elements='str'), volumes_from=dict(type='list', elements='str'), working_dir=dict(type='str'), ) required_if = [ ('state', 'present', ['image']) ] client = AnsibleDockerClientContainer( argument_spec=argument_spec, required_if=required_if, supports_check_mode=True, min_docker_api_version='1.20', ) if client.module.params['networks_cli_compatible'] is None and client.module.params['networks']: client.module.deprecate( 'Please note that docker_container handles networks slightly different than docker CLI. ' 'If you specify networks, the default network will still be attached as the first network. ' '(You can specify purge_networks to remove all networks not explicitly listed.) ' 'This behavior will change in Ansible 2.12. You can change the behavior now by setting ' 'the new `networks_cli_compatible` option to `yes`, and remove this warning by setting ' 'it to `no`', version='2.12' ) try: cm = ContainerManager(client) client.module.exit_json(**sanitize_result(cm.results)) except DockerException as e: client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) except RequestException as e: client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,568
net_put and possible other action plugins broken in devel
##### SUMMARY The `net_put` action plugin and likely other action plugins that use the `network_cli` connection plugin in the latest devel branch. I tracked the problem to the following commit: https://github.com/ansible/ansible/commit/7d3c4a88823846cbcea7c61de38658a6d63d4265 If I checkout the commit that immediately precedes this commit the module functions properly. Note: This also impacts the following PR since I am using the same connection logic as `net_put`. https://github.com/ansible/ansible/pull/60643 ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME network_cli plugin ##### ANSIBLE VERSION ```paste below ansible 2.9.0.dev0 config file = /etc/ansible/ansible.cfg configured module search path = [u'/Users/mwiebe/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/lib/ansible executable location = /Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/bin/ansible python version = 2.7.13 (default, Apr 4 2017, 08:47:57) [GCC 4.2.1 Compatible Apple LLVM 8.1.0 (clang-802.0.38)] ``` ##### CONFIGURATION ```paste below DEFAULT_ROLES_PATH(env: ANSIBLE_ROLES_PATH) = [u'/Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/test/integration/targets'] PARAMIKO_HOST_KEY_AUTO_ADD(env: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD) = True PERSISTENT_COMMAND_TIMEOUT(env: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT) = 1000 PERSISTENT_CONNECT_TIMEOUT(env: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT) = 1000 ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE I am using the following playbook to demonstrate the problem and running it against an Nexus n9k device. ```yaml tasks: - name: copy file from ansible controller to a network device net_put: src: '/Users/mwiebe/Projects/nxos_ansible/fix_ansible/test/integration/network-integration.cfg' dest: 'bootflash:' ``` I added a debug tracepoint inside `net_put` to display the problem: Checkout commit that immediately precedes the problem commit: ```git commit 7d3c4a88823846cbcea7c61de38658a6d63d4265 (HEAD) Author: Nathaniel Case <[email protected]> Date: Wed Aug 14 16:58:03 2019 -0400 Delay persistent connection until needed (#59153) * Delay calling connect() until absolutely necessary * Implement transport_test to enable wait_for_connection * plugin might be connected already for some reason? * ensure_connect for httpapi There's some become shenanigans still needing to be ironed out * Fix tests for network_cli commit f02f5c4b5dad36d696f028078ab545d75ba93d31 Author: René Moser <[email protected]> Date: Wed Aug 14 22:55:31 2019 +0200 cloudscale_server: add tags support (#60396) ``` ``` git checkout f02f5c4b5dad36d696f028078ab545d75ba93d31 ``` Add tracepoint and run the test: ```diff diff --git a/lib/ansible/plugins/action/net_put.py b/lib/ansible/plugins/action/net_put.py index bf6dd52d29..6506b7d11b 100644 --- a/lib/ansible/plugins/action/net_put.py +++ b/lib/ansible/plugins/action/net_put.py @@ -98,6 +98,7 @@ class ActionModule(ActionBase): socket_path = self._connection.socket_path conn = Connection(socket_path) + import epdb ; epdb.set_trace() sock_timeout = conn.get_option('persistent_command_timeout') ``` ```diff (Epdb) list 97 if socket_path is None: 98 socket_path = self._connection.socket_path 99 100 conn = Connection(socket_path) 101 import epdb ; epdb.set_trace() 102 -> sock_timeout = conn.get_option('persistent_command_timeout') 103 104 if dest is None: 105 dest = src_file_path_name 106 107 try: (Epdb) conn.exec_command('show version') u'Cisco Nexus Operating System (NX-OS) Software\nTAC support: http://www.cisco.com/tac\nCopyright (C) 2002-2019, Cisco and/or its affiliates.\nAll rights reserved.\nThe copyrights to certain works contained in this software are\nowned by other third parties and used and distributed under their own\nlicenses, such as open source. This software is provided "as is," and unless\notherwise stated, there is no warranty, express or implied, including but not\nlimited to warranties of merchantability and fitness for a particular purpose.\nCertain components of this software are licensed under\nthe GNU General Public License (GPL) version 2.0 or \nGNU General Public License (GPL) version 3.0 or the GNU\nLesser General Public License (LGPL) Version 2.1 or \nLesser General Public License (LGPL) Version 2.0. \nA copy of each such license is available at\nhttp://www.opensource.org/licenses/gpl-2.0.php and\nhttp://opensource.org/licenses/gpl-3.0.html and\nhttp://www.opensource.org/licenses/lgpl-2.1.php and\nhttp://www.gnu.org/licenses/old-licenses/library.txt.\n\nSoftware\n BIOS: version 08.34\n NXOS: version 9.3(1) [build 9.2(1)IDI9(0.334)]\n BIOS compile time: 04/26/2018\n NXOS image file is: bootflash:///nxos.glmatthe.bin\n NXOS compile time: 8/28/2019 17:00:00 [08/29/2019 18:32:39]\n\n\nHardware\n cisco Nexus9000 C9504 (4 Slot) Chassis ("Supervisor Module")\n Intel(R) Xeon(R) CPU E5-2403 0 @ 1.80GHz with 16399704 kB of memory.\n Processor Board ID SAL1909A7VC\n\n Device name: n9k-109\n bootflash: 53298520 kB\nKernel uptime is 0 day(s), 0 hour(s), 8 minute(s), 30 second(s)\n\nLast reset at 165683 usecs after Thu Aug 29 19:03:48 2019\n Reason: Reset Requested by CLI command reload\n System version: 9.3(1)\n Service: \n\nplugin\n Core Plugin, Ethernet Plugin\n\nActive Package(s):' (Epdb) ``` As you can see it retrieves the version info properly. Now, if I checkout 7d3c4a88823846cbcea7c61de38658a6d63d4265 I see the issue: ``` (Epdb) list 97 if socket_path is None: 98 socket_path = self._connection.socket_path 99 100 conn = Connection(socket_path) 101 import epdb ; epdb.set_trace() 102 -> sock_timeout = conn.get_option('persistent_command_timeout') 103 104 if dest is None: 105 dest = src_file_path_name 106 107 try: (Epdb) conn.exec_command('show version') [127, u'', u'/bin/sh: show: command not found\n'] (Epdb) ``` The exception behind this is: ``` "msg": "Exception received : 'NoneType' object has no attribute '_connect_uncached'" ```
https://github.com/ansible/ansible/issues/61568
https://github.com/ansible/ansible/pull/61570
6e8d430872820d2bcbcb010f092443403317a511
50e09be14f0b055440a3b7df7ed916c8c24bdae2
2019-08-29T19:15:51Z
python
2019-09-09T20:59:20Z
changelogs/fragments/61570-netcli-put-get.yaml
closed
ansible/ansible
https://github.com/ansible/ansible
61,568
net_put and possible other action plugins broken in devel
##### SUMMARY The `net_put` action plugin and likely other action plugins that use the `network_cli` connection plugin in the latest devel branch. I tracked the problem to the following commit: https://github.com/ansible/ansible/commit/7d3c4a88823846cbcea7c61de38658a6d63d4265 If I checkout the commit that immediately precedes this commit the module functions properly. Note: This also impacts the following PR since I am using the same connection logic as `net_put`. https://github.com/ansible/ansible/pull/60643 ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME network_cli plugin ##### ANSIBLE VERSION ```paste below ansible 2.9.0.dev0 config file = /etc/ansible/ansible.cfg configured module search path = [u'/Users/mwiebe/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/lib/ansible executable location = /Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/bin/ansible python version = 2.7.13 (default, Apr 4 2017, 08:47:57) [GCC 4.2.1 Compatible Apple LLVM 8.1.0 (clang-802.0.38)] ``` ##### CONFIGURATION ```paste below DEFAULT_ROLES_PATH(env: ANSIBLE_ROLES_PATH) = [u'/Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/test/integration/targets'] PARAMIKO_HOST_KEY_AUTO_ADD(env: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD) = True PERSISTENT_COMMAND_TIMEOUT(env: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT) = 1000 PERSISTENT_CONNECT_TIMEOUT(env: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT) = 1000 ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE I am using the following playbook to demonstrate the problem and running it against an Nexus n9k device. ```yaml tasks: - name: copy file from ansible controller to a network device net_put: src: '/Users/mwiebe/Projects/nxos_ansible/fix_ansible/test/integration/network-integration.cfg' dest: 'bootflash:' ``` I added a debug tracepoint inside `net_put` to display the problem: Checkout commit that immediately precedes the problem commit: ```git commit 7d3c4a88823846cbcea7c61de38658a6d63d4265 (HEAD) Author: Nathaniel Case <[email protected]> Date: Wed Aug 14 16:58:03 2019 -0400 Delay persistent connection until needed (#59153) * Delay calling connect() until absolutely necessary * Implement transport_test to enable wait_for_connection * plugin might be connected already for some reason? * ensure_connect for httpapi There's some become shenanigans still needing to be ironed out * Fix tests for network_cli commit f02f5c4b5dad36d696f028078ab545d75ba93d31 Author: René Moser <[email protected]> Date: Wed Aug 14 22:55:31 2019 +0200 cloudscale_server: add tags support (#60396) ``` ``` git checkout f02f5c4b5dad36d696f028078ab545d75ba93d31 ``` Add tracepoint and run the test: ```diff diff --git a/lib/ansible/plugins/action/net_put.py b/lib/ansible/plugins/action/net_put.py index bf6dd52d29..6506b7d11b 100644 --- a/lib/ansible/plugins/action/net_put.py +++ b/lib/ansible/plugins/action/net_put.py @@ -98,6 +98,7 @@ class ActionModule(ActionBase): socket_path = self._connection.socket_path conn = Connection(socket_path) + import epdb ; epdb.set_trace() sock_timeout = conn.get_option('persistent_command_timeout') ``` ```diff (Epdb) list 97 if socket_path is None: 98 socket_path = self._connection.socket_path 99 100 conn = Connection(socket_path) 101 import epdb ; epdb.set_trace() 102 -> sock_timeout = conn.get_option('persistent_command_timeout') 103 104 if dest is None: 105 dest = src_file_path_name 106 107 try: (Epdb) conn.exec_command('show version') u'Cisco Nexus Operating System (NX-OS) Software\nTAC support: http://www.cisco.com/tac\nCopyright (C) 2002-2019, Cisco and/or its affiliates.\nAll rights reserved.\nThe copyrights to certain works contained in this software are\nowned by other third parties and used and distributed under their own\nlicenses, such as open source. This software is provided "as is," and unless\notherwise stated, there is no warranty, express or implied, including but not\nlimited to warranties of merchantability and fitness for a particular purpose.\nCertain components of this software are licensed under\nthe GNU General Public License (GPL) version 2.0 or \nGNU General Public License (GPL) version 3.0 or the GNU\nLesser General Public License (LGPL) Version 2.1 or \nLesser General Public License (LGPL) Version 2.0. \nA copy of each such license is available at\nhttp://www.opensource.org/licenses/gpl-2.0.php and\nhttp://opensource.org/licenses/gpl-3.0.html and\nhttp://www.opensource.org/licenses/lgpl-2.1.php and\nhttp://www.gnu.org/licenses/old-licenses/library.txt.\n\nSoftware\n BIOS: version 08.34\n NXOS: version 9.3(1) [build 9.2(1)IDI9(0.334)]\n BIOS compile time: 04/26/2018\n NXOS image file is: bootflash:///nxos.glmatthe.bin\n NXOS compile time: 8/28/2019 17:00:00 [08/29/2019 18:32:39]\n\n\nHardware\n cisco Nexus9000 C9504 (4 Slot) Chassis ("Supervisor Module")\n Intel(R) Xeon(R) CPU E5-2403 0 @ 1.80GHz with 16399704 kB of memory.\n Processor Board ID SAL1909A7VC\n\n Device name: n9k-109\n bootflash: 53298520 kB\nKernel uptime is 0 day(s), 0 hour(s), 8 minute(s), 30 second(s)\n\nLast reset at 165683 usecs after Thu Aug 29 19:03:48 2019\n Reason: Reset Requested by CLI command reload\n System version: 9.3(1)\n Service: \n\nplugin\n Core Plugin, Ethernet Plugin\n\nActive Package(s):' (Epdb) ``` As you can see it retrieves the version info properly. Now, if I checkout 7d3c4a88823846cbcea7c61de38658a6d63d4265 I see the issue: ``` (Epdb) list 97 if socket_path is None: 98 socket_path = self._connection.socket_path 99 100 conn = Connection(socket_path) 101 import epdb ; epdb.set_trace() 102 -> sock_timeout = conn.get_option('persistent_command_timeout') 103 104 if dest is None: 105 dest = src_file_path_name 106 107 try: (Epdb) conn.exec_command('show version') [127, u'', u'/bin/sh: show: command not found\n'] (Epdb) ``` The exception behind this is: ``` "msg": "Exception received : 'NoneType' object has no attribute '_connect_uncached'" ```
https://github.com/ansible/ansible/issues/61568
https://github.com/ansible/ansible/pull/61570
6e8d430872820d2bcbcb010f092443403317a511
50e09be14f0b055440a3b7df7ed916c8c24bdae2
2019-08-29T19:15:51Z
python
2019-09-09T20:59:20Z
lib/ansible/plugins/action/net_get.py
# (c) 2018, Ansible Inc, # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import copy import os import re import uuid import hashlib from ansible.errors import AnsibleError from ansible.module_utils._text import to_text, to_bytes from ansible.module_utils.connection import Connection from ansible.plugins.action import ActionBase from ansible.module_utils.six.moves.urllib.parse import urlsplit from ansible.utils.display import Display display = Display() class ActionModule(ActionBase): def run(self, tmp=None, task_vars=None): socket_path = None play_context = copy.deepcopy(self._play_context) play_context.network_os = self._get_network_os(task_vars) result = super(ActionModule, self).run(task_vars=task_vars) if play_context.connection != 'network_cli': # It is supported only with network_cli result['failed'] = True result['msg'] = ('please use network_cli connection type for net_get module') return result try: src = self._task.args['src'] except KeyError as exc: return {'failed': True, 'msg': 'missing required argument: %s' % exc} # Get destination file if specified dest = self._task.args.get('dest') if dest is None: dest = self._get_default_dest(src) else: dest = self._handle_dest_path(dest) # Get proto proto = self._task.args.get('protocol') if proto is None: proto = 'scp' sock_timeout = play_context.timeout if socket_path is None: socket_path = self._connection.socket_path conn = Connection(socket_path) try: changed = self._handle_existing_file(conn, src, dest, proto, sock_timeout) if changed is False: result['changed'] = False result['destination'] = dest return result except Exception as exc: result['msg'] = ('Warning: exception %s idempotency check failed. Check ' 'dest' % exc) try: out = conn.get_file( source=src, destination=dest, proto=proto, timeout=sock_timeout ) except Exception as exc: result['failed'] = True result['msg'] = ('Exception received : %s' % exc) result['changed'] = True result['destination'] = dest return result def _handle_dest_path(self, dest): working_path = self._get_working_path() if os.path.isabs(dest) or urlsplit('dest').scheme: dst = dest else: dst = self._loader.path_dwim_relative(working_path, '', dest) return dst def _get_src_filename_from_path(self, src_path): filename_list = re.split('/|:', src_path) return filename_list[-1] def _get_default_dest(self, src_path): dest_path = self._get_working_path() src_fname = self._get_src_filename_from_path(src_path) filename = '%s/%s' % (dest_path, src_fname) return filename def _handle_existing_file(self, conn, source, dest, proto, timeout): if not os.path.exists(dest): return True cwd = self._loader.get_basedir() filename = str(uuid.uuid4()) tmp_dest_file = os.path.join(cwd, filename) try: out = conn.get_file( source=source, destination=tmp_dest_file, proto=proto, timeout=timeout ) except Exception as exc: os.remove(tmp_dest_file) raise Exception(exc) try: with open(tmp_dest_file, 'r') as f: new_content = f.read() with open(dest, 'r') as f: old_content = f.read() except (IOError, OSError) as ioexc: raise IOError(ioexc) sha1 = hashlib.sha1() old_content_b = to_bytes(old_content, errors='surrogate_or_strict') sha1.update(old_content_b) checksum_old = sha1.digest() sha1 = hashlib.sha1() new_content_b = to_bytes(new_content, errors='surrogate_or_strict') sha1.update(new_content_b) checksum_new = sha1.digest() os.remove(tmp_dest_file) if checksum_old == checksum_new: return False else: return True def _get_working_path(self): cwd = self._loader.get_basedir() if self._task._role is not None: cwd = self._task._role._role_path return cwd def _get_network_os(self, task_vars): if 'network_os' in self._task.args and self._task.args['network_os']: display.vvvv('Getting network OS from task argument') network_os = self._task.args['network_os'] elif self._play_context.network_os: display.vvvv('Getting network OS from inventory') network_os = self._play_context.network_os elif 'network_os' in task_vars.get('ansible_facts', {}) and task_vars['ansible_facts']['network_os']: display.vvvv('Getting network OS from fact') network_os = task_vars['ansible_facts']['network_os'] else: raise AnsibleError('ansible_network_os must be specified on this host') return network_os
closed
ansible/ansible
https://github.com/ansible/ansible
61,568
net_put and possible other action plugins broken in devel
##### SUMMARY The `net_put` action plugin and likely other action plugins that use the `network_cli` connection plugin in the latest devel branch. I tracked the problem to the following commit: https://github.com/ansible/ansible/commit/7d3c4a88823846cbcea7c61de38658a6d63d4265 If I checkout the commit that immediately precedes this commit the module functions properly. Note: This also impacts the following PR since I am using the same connection logic as `net_put`. https://github.com/ansible/ansible/pull/60643 ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME network_cli plugin ##### ANSIBLE VERSION ```paste below ansible 2.9.0.dev0 config file = /etc/ansible/ansible.cfg configured module search path = [u'/Users/mwiebe/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/lib/ansible executable location = /Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/bin/ansible python version = 2.7.13 (default, Apr 4 2017, 08:47:57) [GCC 4.2.1 Compatible Apple LLVM 8.1.0 (clang-802.0.38)] ``` ##### CONFIGURATION ```paste below DEFAULT_ROLES_PATH(env: ANSIBLE_ROLES_PATH) = [u'/Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/test/integration/targets'] PARAMIKO_HOST_KEY_AUTO_ADD(env: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD) = True PERSISTENT_COMMAND_TIMEOUT(env: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT) = 1000 PERSISTENT_CONNECT_TIMEOUT(env: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT) = 1000 ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE I am using the following playbook to demonstrate the problem and running it against an Nexus n9k device. ```yaml tasks: - name: copy file from ansible controller to a network device net_put: src: '/Users/mwiebe/Projects/nxos_ansible/fix_ansible/test/integration/network-integration.cfg' dest: 'bootflash:' ``` I added a debug tracepoint inside `net_put` to display the problem: Checkout commit that immediately precedes the problem commit: ```git commit 7d3c4a88823846cbcea7c61de38658a6d63d4265 (HEAD) Author: Nathaniel Case <[email protected]> Date: Wed Aug 14 16:58:03 2019 -0400 Delay persistent connection until needed (#59153) * Delay calling connect() until absolutely necessary * Implement transport_test to enable wait_for_connection * plugin might be connected already for some reason? * ensure_connect for httpapi There's some become shenanigans still needing to be ironed out * Fix tests for network_cli commit f02f5c4b5dad36d696f028078ab545d75ba93d31 Author: René Moser <[email protected]> Date: Wed Aug 14 22:55:31 2019 +0200 cloudscale_server: add tags support (#60396) ``` ``` git checkout f02f5c4b5dad36d696f028078ab545d75ba93d31 ``` Add tracepoint and run the test: ```diff diff --git a/lib/ansible/plugins/action/net_put.py b/lib/ansible/plugins/action/net_put.py index bf6dd52d29..6506b7d11b 100644 --- a/lib/ansible/plugins/action/net_put.py +++ b/lib/ansible/plugins/action/net_put.py @@ -98,6 +98,7 @@ class ActionModule(ActionBase): socket_path = self._connection.socket_path conn = Connection(socket_path) + import epdb ; epdb.set_trace() sock_timeout = conn.get_option('persistent_command_timeout') ``` ```diff (Epdb) list 97 if socket_path is None: 98 socket_path = self._connection.socket_path 99 100 conn = Connection(socket_path) 101 import epdb ; epdb.set_trace() 102 -> sock_timeout = conn.get_option('persistent_command_timeout') 103 104 if dest is None: 105 dest = src_file_path_name 106 107 try: (Epdb) conn.exec_command('show version') u'Cisco Nexus Operating System (NX-OS) Software\nTAC support: http://www.cisco.com/tac\nCopyright (C) 2002-2019, Cisco and/or its affiliates.\nAll rights reserved.\nThe copyrights to certain works contained in this software are\nowned by other third parties and used and distributed under their own\nlicenses, such as open source. This software is provided "as is," and unless\notherwise stated, there is no warranty, express or implied, including but not\nlimited to warranties of merchantability and fitness for a particular purpose.\nCertain components of this software are licensed under\nthe GNU General Public License (GPL) version 2.0 or \nGNU General Public License (GPL) version 3.0 or the GNU\nLesser General Public License (LGPL) Version 2.1 or \nLesser General Public License (LGPL) Version 2.0. \nA copy of each such license is available at\nhttp://www.opensource.org/licenses/gpl-2.0.php and\nhttp://opensource.org/licenses/gpl-3.0.html and\nhttp://www.opensource.org/licenses/lgpl-2.1.php and\nhttp://www.gnu.org/licenses/old-licenses/library.txt.\n\nSoftware\n BIOS: version 08.34\n NXOS: version 9.3(1) [build 9.2(1)IDI9(0.334)]\n BIOS compile time: 04/26/2018\n NXOS image file is: bootflash:///nxos.glmatthe.bin\n NXOS compile time: 8/28/2019 17:00:00 [08/29/2019 18:32:39]\n\n\nHardware\n cisco Nexus9000 C9504 (4 Slot) Chassis ("Supervisor Module")\n Intel(R) Xeon(R) CPU E5-2403 0 @ 1.80GHz with 16399704 kB of memory.\n Processor Board ID SAL1909A7VC\n\n Device name: n9k-109\n bootflash: 53298520 kB\nKernel uptime is 0 day(s), 0 hour(s), 8 minute(s), 30 second(s)\n\nLast reset at 165683 usecs after Thu Aug 29 19:03:48 2019\n Reason: Reset Requested by CLI command reload\n System version: 9.3(1)\n Service: \n\nplugin\n Core Plugin, Ethernet Plugin\n\nActive Package(s):' (Epdb) ``` As you can see it retrieves the version info properly. Now, if I checkout 7d3c4a88823846cbcea7c61de38658a6d63d4265 I see the issue: ``` (Epdb) list 97 if socket_path is None: 98 socket_path = self._connection.socket_path 99 100 conn = Connection(socket_path) 101 import epdb ; epdb.set_trace() 102 -> sock_timeout = conn.get_option('persistent_command_timeout') 103 104 if dest is None: 105 dest = src_file_path_name 106 107 try: (Epdb) conn.exec_command('show version') [127, u'', u'/bin/sh: show: command not found\n'] (Epdb) ``` The exception behind this is: ``` "msg": "Exception received : 'NoneType' object has no attribute '_connect_uncached'" ```
https://github.com/ansible/ansible/issues/61568
https://github.com/ansible/ansible/pull/61570
6e8d430872820d2bcbcb010f092443403317a511
50e09be14f0b055440a3b7df7ed916c8c24bdae2
2019-08-29T19:15:51Z
python
2019-09-09T20:59:20Z
lib/ansible/plugins/action/net_put.py
# (c) 2018, Ansible Inc, # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import copy import os import time import uuid import hashlib import sys import re from ansible.errors import AnsibleError from ansible.module_utils._text import to_text, to_bytes from ansible.module_utils.connection import Connection from ansible.plugins.action import ActionBase from ansible.module_utils.six.moves.urllib.parse import urlsplit from ansible.utils.display import Display display = Display() class ActionModule(ActionBase): def run(self, tmp=None, task_vars=None): changed = True socket_path = None play_context = copy.deepcopy(self._play_context) play_context.network_os = self._get_network_os(task_vars) result = super(ActionModule, self).run(task_vars=task_vars) if play_context.connection != 'network_cli': # It is supported only with network_cli result['failed'] = True result['msg'] = ('please use network_cli connection type for net_put module') return result try: src = self._task.args.get('src') except KeyError as exc: return {'failed': True, 'msg': 'missing required argument: %s' % exc} src_file_path_name = src # Get destination file if specified dest = self._task.args.get('dest') # Get proto proto = self._task.args.get('protocol') if proto is None: proto = 'scp' # Get mode if set mode = self._task.args.get('mode') if mode is None: mode = 'binary' if mode == 'text': try: self._handle_template(convert_data=False) except ValueError as exc: return dict(failed=True, msg=to_text(exc)) # Now src has resolved file write to disk in current diectory for scp src = self._task.args.get('src') filename = str(uuid.uuid4()) cwd = self._loader.get_basedir() output_file = os.path.join(cwd, filename) try: with open(output_file, 'wb') as f: f.write(to_bytes(src, encoding='utf-8')) except Exception: os.remove(output_file) raise else: try: output_file = self._get_binary_src_file(src) except ValueError as exc: return dict(failed=True, msg=to_text(exc)) if socket_path is None: socket_path = self._connection.socket_path conn = Connection(socket_path) sock_timeout = conn.get_option('persistent_command_timeout') if dest is None: dest = src_file_path_name try: changed = self._handle_existing_file(conn, output_file, dest, proto, sock_timeout) if changed is False: result['changed'] = False result['destination'] = dest return result except Exception as exc: result['msg'] = ('Warning: Exc %s idempotency check failed. Check' 'dest' % exc) try: out = conn.copy_file( source=output_file, destination=dest, proto=proto, timeout=sock_timeout ) except Exception as exc: if to_text(exc) == "No response from server": if play_context.network_os == 'iosxr': # IOSXR sometimes closes socket prematurely after completion # of file transfer result['msg'] = 'Warning: iosxr scp server pre close issue. Please check dest' else: result['failed'] = True result['msg'] = ('Exception received : %s' % exc) if mode == 'text': # Cleanup tmp file expanded wih ansible vars os.remove(output_file) result['changed'] = changed result['destination'] = dest return result def _handle_existing_file(self, conn, source, dest, proto, timeout): cwd = self._loader.get_basedir() filename = str(uuid.uuid4()) source_file = os.path.join(cwd, filename) try: out = conn.get_file( source=dest, destination=source_file, proto=proto, timeout=timeout ) except Exception as exc: pattern = to_text(exc) not_found_exc = "No such file or directory" if re.search(not_found_exc, pattern, re.I): if os.path.exists(source_file): os.remove(source_file) return True else: try: os.remove(source_file) except OSError as osex: raise Exception(osex) try: with open(source, 'r') as f: new_content = f.read() with open(source_file, 'r') as f: old_content = f.read() except (IOError, OSError) as ioexc: os.remove(source_file) raise IOError(ioexc) sha1 = hashlib.sha1() old_content_b = to_bytes(old_content, errors='surrogate_or_strict') sha1.update(old_content_b) checksum_old = sha1.digest() sha1 = hashlib.sha1() new_content_b = to_bytes(new_content, errors='surrogate_or_strict') sha1.update(new_content_b) checksum_new = sha1.digest() os.remove(source_file) if checksum_old == checksum_new: return False else: return True def _get_binary_src_file(self, src): working_path = self._get_working_path() if os.path.isabs(src) or urlsplit('src').scheme: source = src else: source = self._loader.path_dwim_relative(working_path, 'templates', src) if not source: source = self._loader.path_dwim_relative(working_path, src) if not os.path.exists(source): raise ValueError('path specified in src not found') return source def _get_working_path(self): cwd = self._loader.get_basedir() if self._task._role is not None: cwd = self._task._role._role_path return cwd def _get_network_os(self, task_vars): if 'network_os' in self._task.args and self._task.args['network_os']: display.vvvv('Getting network OS from task argument') network_os = self._task.args['network_os'] elif self._play_context.network_os: display.vvvv('Getting network OS from inventory') network_os = self._play_context.network_os elif 'network_os' in task_vars.get('ansible_facts', {}) and task_vars['ansible_facts']['network_os']: display.vvvv('Getting network OS from fact') network_os = task_vars['ansible_facts']['network_os'] else: raise AnsibleError('ansible_network_os must be specified on this host') return network_os
closed
ansible/ansible
https://github.com/ansible/ansible
61,568
net_put and possible other action plugins broken in devel
##### SUMMARY The `net_put` action plugin and likely other action plugins that use the `network_cli` connection plugin in the latest devel branch. I tracked the problem to the following commit: https://github.com/ansible/ansible/commit/7d3c4a88823846cbcea7c61de38658a6d63d4265 If I checkout the commit that immediately precedes this commit the module functions properly. Note: This also impacts the following PR since I am using the same connection logic as `net_put`. https://github.com/ansible/ansible/pull/60643 ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME network_cli plugin ##### ANSIBLE VERSION ```paste below ansible 2.9.0.dev0 config file = /etc/ansible/ansible.cfg configured module search path = [u'/Users/mwiebe/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/lib/ansible executable location = /Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/bin/ansible python version = 2.7.13 (default, Apr 4 2017, 08:47:57) [GCC 4.2.1 Compatible Apple LLVM 8.1.0 (clang-802.0.38)] ``` ##### CONFIGURATION ```paste below DEFAULT_ROLES_PATH(env: ANSIBLE_ROLES_PATH) = [u'/Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/test/integration/targets'] PARAMIKO_HOST_KEY_AUTO_ADD(env: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD) = True PERSISTENT_COMMAND_TIMEOUT(env: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT) = 1000 PERSISTENT_CONNECT_TIMEOUT(env: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT) = 1000 ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE I am using the following playbook to demonstrate the problem and running it against an Nexus n9k device. ```yaml tasks: - name: copy file from ansible controller to a network device net_put: src: '/Users/mwiebe/Projects/nxos_ansible/fix_ansible/test/integration/network-integration.cfg' dest: 'bootflash:' ``` I added a debug tracepoint inside `net_put` to display the problem: Checkout commit that immediately precedes the problem commit: ```git commit 7d3c4a88823846cbcea7c61de38658a6d63d4265 (HEAD) Author: Nathaniel Case <[email protected]> Date: Wed Aug 14 16:58:03 2019 -0400 Delay persistent connection until needed (#59153) * Delay calling connect() until absolutely necessary * Implement transport_test to enable wait_for_connection * plugin might be connected already for some reason? * ensure_connect for httpapi There's some become shenanigans still needing to be ironed out * Fix tests for network_cli commit f02f5c4b5dad36d696f028078ab545d75ba93d31 Author: René Moser <[email protected]> Date: Wed Aug 14 22:55:31 2019 +0200 cloudscale_server: add tags support (#60396) ``` ``` git checkout f02f5c4b5dad36d696f028078ab545d75ba93d31 ``` Add tracepoint and run the test: ```diff diff --git a/lib/ansible/plugins/action/net_put.py b/lib/ansible/plugins/action/net_put.py index bf6dd52d29..6506b7d11b 100644 --- a/lib/ansible/plugins/action/net_put.py +++ b/lib/ansible/plugins/action/net_put.py @@ -98,6 +98,7 @@ class ActionModule(ActionBase): socket_path = self._connection.socket_path conn = Connection(socket_path) + import epdb ; epdb.set_trace() sock_timeout = conn.get_option('persistent_command_timeout') ``` ```diff (Epdb) list 97 if socket_path is None: 98 socket_path = self._connection.socket_path 99 100 conn = Connection(socket_path) 101 import epdb ; epdb.set_trace() 102 -> sock_timeout = conn.get_option('persistent_command_timeout') 103 104 if dest is None: 105 dest = src_file_path_name 106 107 try: (Epdb) conn.exec_command('show version') u'Cisco Nexus Operating System (NX-OS) Software\nTAC support: http://www.cisco.com/tac\nCopyright (C) 2002-2019, Cisco and/or its affiliates.\nAll rights reserved.\nThe copyrights to certain works contained in this software are\nowned by other third parties and used and distributed under their own\nlicenses, such as open source. This software is provided "as is," and unless\notherwise stated, there is no warranty, express or implied, including but not\nlimited to warranties of merchantability and fitness for a particular purpose.\nCertain components of this software are licensed under\nthe GNU General Public License (GPL) version 2.0 or \nGNU General Public License (GPL) version 3.0 or the GNU\nLesser General Public License (LGPL) Version 2.1 or \nLesser General Public License (LGPL) Version 2.0. \nA copy of each such license is available at\nhttp://www.opensource.org/licenses/gpl-2.0.php and\nhttp://opensource.org/licenses/gpl-3.0.html and\nhttp://www.opensource.org/licenses/lgpl-2.1.php and\nhttp://www.gnu.org/licenses/old-licenses/library.txt.\n\nSoftware\n BIOS: version 08.34\n NXOS: version 9.3(1) [build 9.2(1)IDI9(0.334)]\n BIOS compile time: 04/26/2018\n NXOS image file is: bootflash:///nxos.glmatthe.bin\n NXOS compile time: 8/28/2019 17:00:00 [08/29/2019 18:32:39]\n\n\nHardware\n cisco Nexus9000 C9504 (4 Slot) Chassis ("Supervisor Module")\n Intel(R) Xeon(R) CPU E5-2403 0 @ 1.80GHz with 16399704 kB of memory.\n Processor Board ID SAL1909A7VC\n\n Device name: n9k-109\n bootflash: 53298520 kB\nKernel uptime is 0 day(s), 0 hour(s), 8 minute(s), 30 second(s)\n\nLast reset at 165683 usecs after Thu Aug 29 19:03:48 2019\n Reason: Reset Requested by CLI command reload\n System version: 9.3(1)\n Service: \n\nplugin\n Core Plugin, Ethernet Plugin\n\nActive Package(s):' (Epdb) ``` As you can see it retrieves the version info properly. Now, if I checkout 7d3c4a88823846cbcea7c61de38658a6d63d4265 I see the issue: ``` (Epdb) list 97 if socket_path is None: 98 socket_path = self._connection.socket_path 99 100 conn = Connection(socket_path) 101 import epdb ; epdb.set_trace() 102 -> sock_timeout = conn.get_option('persistent_command_timeout') 103 104 if dest is None: 105 dest = src_file_path_name 106 107 try: (Epdb) conn.exec_command('show version') [127, u'', u'/bin/sh: show: command not found\n'] (Epdb) ``` The exception behind this is: ``` "msg": "Exception received : 'NoneType' object has no attribute '_connect_uncached'" ```
https://github.com/ansible/ansible/issues/61568
https://github.com/ansible/ansible/pull/61570
6e8d430872820d2bcbcb010f092443403317a511
50e09be14f0b055440a3b7df7ed916c8c24bdae2
2019-08-29T19:15:51Z
python
2019-09-09T20:59:20Z
lib/ansible/plugins/cliconf/__init__.py
# # (c) 2017 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type from abc import abstractmethod from functools import wraps from ansible.plugins import AnsiblePlugin from ansible.errors import AnsibleError, AnsibleConnectionFailure from ansible.module_utils._text import to_bytes, to_text try: from scp import SCPClient HAS_SCP = True except ImportError: HAS_SCP = False def enable_mode(func): @wraps(func) def wrapped(self, *args, **kwargs): prompt = self._connection.get_prompt() if not to_text(prompt, errors='surrogate_or_strict').strip().endswith('#'): raise AnsibleError('operation requires privilege escalation') return func(self, *args, **kwargs) return wrapped class CliconfBase(AnsiblePlugin): """ A base class for implementing cli connections .. note:: String inputs to :meth:`send_command` will be cast to byte strings within this method and as such are not required to be made byte strings beforehand. Please avoid using literal byte strings (``b'string'``) in :class:`CliConfBase` plugins as this can lead to unexpected errors when running on Python 3 List of supported rpc's: :get_config: Retrieves the specified configuration from the device :edit_config: Loads the specified commands into the remote device :get: Execute specified command on remote device :get_capabilities: Retrieves device information and supported rpc methods :commit: Load configuration from candidate to running :discard_changes: Discard changes to candidate datastore Note: List of supported rpc's for remote device can be extracted from output of get_capabilities() :returns: Returns output received from remote device as byte string Usage: from ansible.module_utils.connection import Connection conn = Connection() conn.get('show lldp neighbors detail'') conn.get_config('running') conn.edit_config(['hostname test', 'netconf ssh']) """ __rpc__ = ['get_config', 'edit_config', 'get_capabilities', 'get', 'enable_response_logging', 'disable_response_logging'] def __init__(self, connection): super(CliconfBase, self).__init__() self._connection = connection self.history = list() self.response_logging = False def _alarm_handler(self, signum, frame): """Alarm handler raised in case of command timeout """ self._connection.queue_message('log', 'closing shell due to command timeout (%s seconds).' % self._connection._play_context.timeout) self.close() def send_command(self, command=None, prompt=None, answer=None, sendonly=False, newline=True, prompt_retry_check=False, check_all=False): """Executes a command over the device connection This method will execute a command over the device connection and return the results to the caller. This method will also perform logging of any commands based on the `nolog` argument. :param command: The command to send over the connection to the device :param prompt: A single regex pattern or a sequence of patterns to evaluate the expected prompt from the command :param answer: The answer to respond with if the prompt is matched. :param sendonly: Bool value that will send the command but not wait for a result. :param newline: Bool value that will append the newline character to the command :param prompt_retry_check: Bool value for trying to detect more prompts :param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of given prompt. :returns: The output from the device after executing the command """ kwargs = { 'command': to_bytes(command), 'sendonly': sendonly, 'newline': newline, 'prompt_retry_check': prompt_retry_check, 'check_all': check_all } if prompt is not None: if isinstance(prompt, list): kwargs['prompt'] = [to_bytes(p) for p in prompt] else: kwargs['prompt'] = to_bytes(prompt) if answer is not None: if isinstance(answer, list): kwargs['answer'] = [to_bytes(p) for p in answer] else: kwargs['answer'] = to_bytes(answer) resp = self._connection.send(**kwargs) if not self.response_logging: self.history.append(('*****', '*****')) else: self.history.append((kwargs['command'], resp)) return resp def get_base_rpc(self): """Returns list of base rpc method supported by remote device""" return self.__rpc__ def get_history(self): """ Returns the history file for all commands This will return a log of all the commands that have been sent to the device and all of the output received. By default, all commands and output will be redacted unless explicitly configured otherwise. :return: An ordered list of command, output pairs """ return self.history def reset_history(self): """ Resets the history of run commands :return: None """ self.history = list() def enable_response_logging(self): """Enable logging command response""" self.response_logging = True def disable_response_logging(self): """Disable logging command response""" self.response_logging = False @abstractmethod def get_config(self, source='running', flags=None, format=None): """Retrieves the specified configuration from the device This method will retrieve the configuration specified by source and return it to the caller as a string. Subsequent calls to this method will retrieve a new configuration from the device :param source: The configuration source to return from the device. This argument accepts either `running` or `startup` as valid values. :param flags: For devices that support configuration filtering, this keyword argument is used to filter the returned configuration. The use of this keyword argument is device dependent adn will be silently ignored on devices that do not support it. :param format: For devices that support fetching different configuration format, this keyword argument is used to specify the format in which configuration is to be retrieved. :return: The device configuration as specified by the source argument. """ pass @abstractmethod def edit_config(self, candidate=None, commit=True, replace=None, diff=False, comment=None): """Loads the candidate configuration into the network device This method will load the specified candidate config into the device and merge with the current configuration unless replace is set to True. If the device does not support config replace an errors is returned. :param candidate: The configuration to load into the device and merge with the current running configuration :param commit: Boolean value that indicates if the device candidate configuration should be pushed in the running configuration or discarded. :param replace: If the value is True/False it indicates if running configuration should be completely replace by candidate configuration. If can also take configuration file path as value, the file in this case should be present on the remote host in the mentioned path as a prerequisite. :param comment: Commit comment provided it is supported by remote host :return: Returns a json string with contains configuration applied on remote host, the returned response on executing configuration commands and platform relevant data. { "diff": "", "response": [], "request": [] } """ pass @abstractmethod def get(self, command=None, prompt=None, answer=None, sendonly=False, newline=True, output=None, check_all=False): """Execute specified command on remote device This method will retrieve the specified data and return it to the caller as a string. :param command: command in string format to be executed on remote device :param prompt: the expected prompt generated by executing command, this can be a string or a list of strings :param answer: the string to respond to the prompt with :param sendonly: bool to disable waiting for response, default is false :param newline: bool to indicate if newline should be added at end of answer or not :param output: For devices that support fetching command output in different format, this keyword argument is used to specify the output in which response is to be retrieved. :param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of given prompt. :return: The output from the device after executing the command """ pass @abstractmethod def get_capabilities(self): """Returns the basic capabilities of the network device This method will provide some basic facts about the device and what capabilities it has to modify the configuration. The minimum return from this method takes the following format. eg: { 'rpc': [list of supported rpcs], 'network_api': <str>, # the name of the transport 'device_info': { 'network_os': <str>, 'network_os_version': <str>, 'network_os_model': <str>, 'network_os_hostname': <str>, 'network_os_image': <str>, 'network_os_platform': <str>, }, 'device_operations': { 'supports_diff_replace': <bool>, # identify if config should be merged or replaced is supported 'supports_commit': <bool>, # identify if commit is supported by device or not 'supports_rollback': <bool>, # identify if rollback is supported or not 'supports_defaults': <bool>, # identify if fetching running config with default is supported 'supports_commit_comment': <bool>, # identify if adding comment to commit is supported of not 'supports_onbox_diff: <bool>, # identify if on box diff capability is supported or not 'supports_generate_diff: <bool>, # identify if diff capability is supported within plugin 'supports_multiline_delimiter: <bool>, # identify if multiline demiliter is supported within config 'supports_diff_match: <bool>, # identify if match is supported 'supports_diff_ignore_lines: <bool>, # identify if ignore line in diff is supported 'supports_config_replace': <bool>, # identify if running config replace with candidate config is supported 'supports_admin': <bool>, # identify if admin configure mode is supported or not 'supports_commit_label': <bool>, # identify if commit label is supported or not } 'format': [list of supported configuration format], 'diff_match': [list of supported match values], 'diff_replace': [list of supported replace values], 'output': [list of supported command output format] } :return: capability as json string """ result = {} result['rpc'] = self.get_base_rpc() result['device_info'] = self.get_device_info() result['network_api'] = 'cliconf' return result @abstractmethod def get_device_info(self): """Returns basic information about the network device. This method will provide basic information about the device such as OS version and model name. This data is expected to be used to fill the 'device_info' key in get_capabilities() above. :return: dictionary of device information """ pass def commit(self, comment=None): """Commit configuration changes This method will perform the commit operation on a previously loaded candidate configuration that was loaded using `edit_config()`. If there is a candidate configuration, it will be committed to the active configuration. If there is not a candidate configuration, this method should just silently return. :return: None """ return self._connection.method_not_found("commit is not supported by network_os %s" % self._play_context.network_os) def discard_changes(self): """Discard candidate configuration This method will discard the current candidate configuration if one is present. If there is no candidate configuration currently loaded, then this method should just silently return :returns: None """ return self._connection.method_not_found("discard_changes is not supported by network_os %s" % self._play_context.network_os) def rollback(self, rollback_id, commit=True): """ :param rollback_id: The commit id to which configuration should be rollbacked :param commit: Flag to indicate if changes should be committed or not :return: Returns diff between before and after change. """ pass def copy_file(self, source=None, destination=None, proto='scp', timeout=30): """Copies file over scp/sftp to remote device :param source: Source file path :param destination: Destination file path on remote device :param proto: Protocol to be used for file transfer, supported protocol: scp and sftp :param timeout: Specifies the wait time to receive response from remote host before triggering timeout exception :return: None """ ssh = self._connection.paramiko_conn._connect_uncached() if proto == 'scp': if not HAS_SCP: raise AnsibleError("Required library scp is not installed. Please install it using `pip install scp`") with SCPClient(ssh.get_transport(), socket_timeout=timeout) as scp: out = scp.put(source, destination) elif proto == 'sftp': with ssh.open_sftp() as sftp: sftp.put(source, destination) def get_file(self, source=None, destination=None, proto='scp', timeout=30): """Fetch file over scp/sftp from remote device :param source: Source file path :param destination: Destination file path :param proto: Protocol to be used for file transfer, supported protocol: scp and sftp :param timeout: Specifies the wait time to receive response from remote host before triggering timeout exception :return: None """ """Fetch file over scp/sftp from remote device""" ssh = self._connection.paramiko_conn._connect_uncached() if proto == 'scp': if not HAS_SCP: raise AnsibleError("Required library scp is not installed. Please install it using `pip install scp`") with SCPClient(ssh.get_transport(), socket_timeout=timeout) as scp: scp.get(source, destination) elif proto == 'sftp': with ssh.open_sftp() as sftp: sftp.get(source, destination) def get_diff(self, candidate=None, running=None, diff_match=None, diff_ignore_lines=None, path=None, diff_replace=None): """ Generate diff between candidate and running configuration. If the remote host supports onbox diff capabilities ie. supports_onbox_diff in that case candidate and running configurations are not required to be passed as argument. In case if onbox diff capability is not supported candidate argument is mandatory and running argument is optional. :param candidate: The configuration which is expected to be present on remote host. :param running: The base configuration which is used to generate diff. :param diff_match: Instructs how to match the candidate configuration with current device configuration Valid values are 'line', 'strict', 'exact', 'none'. 'line' - commands are matched line by line 'strict' - command lines are matched with respect to position 'exact' - command lines must be an equal match 'none' - will not compare the candidate configuration with the running configuration :param diff_ignore_lines: Use this argument to specify one or more lines that should be ignored during the diff. This is used for lines in the configuration that are automatically updated by the system. This argument takes a list of regular expressions or exact line matches. :param path: The ordered set of parents that uniquely identify the section or hierarchy the commands should be checked against. If the parents argument is omitted, the commands are checked against the set of top level or global commands. :param diff_replace: Instructs on the way to perform the configuration on the device. If the replace argument is set to I(line) then the modified lines are pushed to the device in configuration mode. If the replace argument is set to I(block) then the entire command block is pushed to the device in configuration mode if any line is not correct. :return: Configuration and/or banner diff in json format. { 'config_diff': '' } """ pass def run_commands(self, commands=None, check_rc=True): """ Execute a list of commands on remote host and return the list of response :param commands: The list of command that needs to be executed on remote host. The individual command in list can either be a command string or command dict. If the command is dict the valid keys are { 'command': <command to be executed> 'prompt': <expected prompt on executing the command>, 'answer': <answer for the prompt>, 'output': <the format in which command output should be rendered eg: 'json', 'text'>, 'sendonly': <Boolean flag to indicate if it command execution response should be ignored or not> } :param check_rc: Boolean flag to check if returned response should be checked for error or not. If check_rc is False the error output is appended in return response list, else if the value is True an exception is raised. :return: List of returned response """ pass def check_edit_config_capability(self, operations, candidate=None, commit=True, replace=None, comment=None): if not candidate and not replace: raise ValueError("must provide a candidate or replace to load configuration") if commit not in (True, False): raise ValueError("'commit' must be a bool, got %s" % commit) if replace and not operations['supports_replace']: raise ValueError("configuration replace is not supported") if comment and not operations.get('supports_commit_comment', False): raise ValueError("commit comment is not supported") if replace and not operations.get('supports_replace', False): raise ValueError("configuration replace is not supported")
closed
ansible/ansible
https://github.com/ansible/ansible
61,568
net_put and possible other action plugins broken in devel
##### SUMMARY The `net_put` action plugin and likely other action plugins that use the `network_cli` connection plugin in the latest devel branch. I tracked the problem to the following commit: https://github.com/ansible/ansible/commit/7d3c4a88823846cbcea7c61de38658a6d63d4265 If I checkout the commit that immediately precedes this commit the module functions properly. Note: This also impacts the following PR since I am using the same connection logic as `net_put`. https://github.com/ansible/ansible/pull/60643 ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME network_cli plugin ##### ANSIBLE VERSION ```paste below ansible 2.9.0.dev0 config file = /etc/ansible/ansible.cfg configured module search path = [u'/Users/mwiebe/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/lib/ansible executable location = /Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/bin/ansible python version = 2.7.13 (default, Apr 4 2017, 08:47:57) [GCC 4.2.1 Compatible Apple LLVM 8.1.0 (clang-802.0.38)] ``` ##### CONFIGURATION ```paste below DEFAULT_ROLES_PATH(env: ANSIBLE_ROLES_PATH) = [u'/Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/test/integration/targets'] PARAMIKO_HOST_KEY_AUTO_ADD(env: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD) = True PERSISTENT_COMMAND_TIMEOUT(env: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT) = 1000 PERSISTENT_CONNECT_TIMEOUT(env: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT) = 1000 ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE I am using the following playbook to demonstrate the problem and running it against an Nexus n9k device. ```yaml tasks: - name: copy file from ansible controller to a network device net_put: src: '/Users/mwiebe/Projects/nxos_ansible/fix_ansible/test/integration/network-integration.cfg' dest: 'bootflash:' ``` I added a debug tracepoint inside `net_put` to display the problem: Checkout commit that immediately precedes the problem commit: ```git commit 7d3c4a88823846cbcea7c61de38658a6d63d4265 (HEAD) Author: Nathaniel Case <[email protected]> Date: Wed Aug 14 16:58:03 2019 -0400 Delay persistent connection until needed (#59153) * Delay calling connect() until absolutely necessary * Implement transport_test to enable wait_for_connection * plugin might be connected already for some reason? * ensure_connect for httpapi There's some become shenanigans still needing to be ironed out * Fix tests for network_cli commit f02f5c4b5dad36d696f028078ab545d75ba93d31 Author: René Moser <[email protected]> Date: Wed Aug 14 22:55:31 2019 +0200 cloudscale_server: add tags support (#60396) ``` ``` git checkout f02f5c4b5dad36d696f028078ab545d75ba93d31 ``` Add tracepoint and run the test: ```diff diff --git a/lib/ansible/plugins/action/net_put.py b/lib/ansible/plugins/action/net_put.py index bf6dd52d29..6506b7d11b 100644 --- a/lib/ansible/plugins/action/net_put.py +++ b/lib/ansible/plugins/action/net_put.py @@ -98,6 +98,7 @@ class ActionModule(ActionBase): socket_path = self._connection.socket_path conn = Connection(socket_path) + import epdb ; epdb.set_trace() sock_timeout = conn.get_option('persistent_command_timeout') ``` ```diff (Epdb) list 97 if socket_path is None: 98 socket_path = self._connection.socket_path 99 100 conn = Connection(socket_path) 101 import epdb ; epdb.set_trace() 102 -> sock_timeout = conn.get_option('persistent_command_timeout') 103 104 if dest is None: 105 dest = src_file_path_name 106 107 try: (Epdb) conn.exec_command('show version') u'Cisco Nexus Operating System (NX-OS) Software\nTAC support: http://www.cisco.com/tac\nCopyright (C) 2002-2019, Cisco and/or its affiliates.\nAll rights reserved.\nThe copyrights to certain works contained in this software are\nowned by other third parties and used and distributed under their own\nlicenses, such as open source. This software is provided "as is," and unless\notherwise stated, there is no warranty, express or implied, including but not\nlimited to warranties of merchantability and fitness for a particular purpose.\nCertain components of this software are licensed under\nthe GNU General Public License (GPL) version 2.0 or \nGNU General Public License (GPL) version 3.0 or the GNU\nLesser General Public License (LGPL) Version 2.1 or \nLesser General Public License (LGPL) Version 2.0. \nA copy of each such license is available at\nhttp://www.opensource.org/licenses/gpl-2.0.php and\nhttp://opensource.org/licenses/gpl-3.0.html and\nhttp://www.opensource.org/licenses/lgpl-2.1.php and\nhttp://www.gnu.org/licenses/old-licenses/library.txt.\n\nSoftware\n BIOS: version 08.34\n NXOS: version 9.3(1) [build 9.2(1)IDI9(0.334)]\n BIOS compile time: 04/26/2018\n NXOS image file is: bootflash:///nxos.glmatthe.bin\n NXOS compile time: 8/28/2019 17:00:00 [08/29/2019 18:32:39]\n\n\nHardware\n cisco Nexus9000 C9504 (4 Slot) Chassis ("Supervisor Module")\n Intel(R) Xeon(R) CPU E5-2403 0 @ 1.80GHz with 16399704 kB of memory.\n Processor Board ID SAL1909A7VC\n\n Device name: n9k-109\n bootflash: 53298520 kB\nKernel uptime is 0 day(s), 0 hour(s), 8 minute(s), 30 second(s)\n\nLast reset at 165683 usecs after Thu Aug 29 19:03:48 2019\n Reason: Reset Requested by CLI command reload\n System version: 9.3(1)\n Service: \n\nplugin\n Core Plugin, Ethernet Plugin\n\nActive Package(s):' (Epdb) ``` As you can see it retrieves the version info properly. Now, if I checkout 7d3c4a88823846cbcea7c61de38658a6d63d4265 I see the issue: ``` (Epdb) list 97 if socket_path is None: 98 socket_path = self._connection.socket_path 99 100 conn = Connection(socket_path) 101 import epdb ; epdb.set_trace() 102 -> sock_timeout = conn.get_option('persistent_command_timeout') 103 104 if dest is None: 105 dest = src_file_path_name 106 107 try: (Epdb) conn.exec_command('show version') [127, u'', u'/bin/sh: show: command not found\n'] (Epdb) ``` The exception behind this is: ``` "msg": "Exception received : 'NoneType' object has no attribute '_connect_uncached'" ```
https://github.com/ansible/ansible/issues/61568
https://github.com/ansible/ansible/pull/61570
6e8d430872820d2bcbcb010f092443403317a511
50e09be14f0b055440a3b7df7ed916c8c24bdae2
2019-08-29T19:15:51Z
python
2019-09-09T20:59:20Z
lib/ansible/plugins/connection/network_cli.py
# (c) 2016 Red Hat Inc. # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ --- author: Ansible Networking Team connection: network_cli short_description: Use network_cli to run command on network appliances description: - This connection plugin provides a connection to remote devices over the SSH and implements a CLI shell. This connection plugin is typically used by network devices for sending and receiving CLi commands to network devices. version_added: "2.3" options: host: description: - Specifies the remote device FQDN or IP address to establish the SSH connection to. default: inventory_hostname vars: - name: ansible_host port: type: int description: - Specifies the port on the remote device that listens for connections when establishing the SSH connection. default: 22 ini: - section: defaults key: remote_port env: - name: ANSIBLE_REMOTE_PORT vars: - name: ansible_port network_os: description: - Configures the device platform network operating system. This value is used to load the correct terminal and cliconf plugins to communicate with the remote device. vars: - name: ansible_network_os remote_user: description: - The username used to authenticate to the remote device when the SSH connection is first established. If the remote_user is not specified, the connection will use the username of the logged in user. - Can be configured from the CLI via the C(--user) or C(-u) options. ini: - section: defaults key: remote_user env: - name: ANSIBLE_REMOTE_USER vars: - name: ansible_user password: description: - Configures the user password used to authenticate to the remote device when first establishing the SSH connection. vars: - name: ansible_password - name: ansible_ssh_pass - name: ansible_ssh_password private_key_file: description: - The private SSH key or certificate file used to authenticate to the remote device when first establishing the SSH connection. ini: - section: defaults key: private_key_file env: - name: ANSIBLE_PRIVATE_KEY_FILE vars: - name: ansible_private_key_file timeout: type: int description: - Sets the connection time, in seconds, for communicating with the remote device. This timeout is used as the default timeout value for commands when issuing a command to the network CLI. If the command does not return in timeout seconds, an error is generated. default: 120 become: type: boolean description: - The become option will instruct the CLI session to attempt privilege escalation on platforms that support it. Normally this means transitioning from user mode to C(enable) mode in the CLI session. If become is set to True and the remote device does not support privilege escalation or the privilege has already been elevated, then this option is silently ignored. - Can be configured from the CLI via the C(--become) or C(-b) options. default: False ini: - section: privilege_escalation key: become env: - name: ANSIBLE_BECOME vars: - name: ansible_become become_method: description: - This option allows the become method to be specified in for handling privilege escalation. Typically the become_method value is set to C(enable) but could be defined as other values. default: sudo ini: - section: privilege_escalation key: become_method env: - name: ANSIBLE_BECOME_METHOD vars: - name: ansible_become_method host_key_auto_add: type: boolean description: - By default, Ansible will prompt the user before adding SSH keys to the known hosts file. Since persistent connections such as network_cli run in background processes, the user will never be prompted. By enabling this option, unknown host keys will automatically be added to the known hosts file. - Be sure to fully understand the security implications of enabling this option on production systems as it could create a security vulnerability. default: False ini: - section: paramiko_connection key: host_key_auto_add env: - name: ANSIBLE_HOST_KEY_AUTO_ADD persistent_connect_timeout: type: int description: - Configures, in seconds, the amount of time to wait when trying to initially establish a persistent connection. If this value expires before the connection to the remote device is completed, the connection will fail. default: 30 ini: - section: persistent_connection key: connect_timeout env: - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT vars: - name: ansible_connect_timeout persistent_command_timeout: type: int description: - Configures, in seconds, the amount of time to wait for a command to return from the remote device. If this timer is exceeded before the command returns, the connection plugin will raise an exception and close. default: 30 ini: - section: persistent_connection key: command_timeout env: - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT vars: - name: ansible_command_timeout persistent_buffer_read_timeout: type: float description: - Configures, in seconds, the amount of time to wait for the data to be read from Paramiko channel after the command prompt is matched. This timeout value ensures that command prompt matched is correct and there is no more data left to be received from remote host. default: 0.1 ini: - section: persistent_connection key: buffer_read_timeout env: - name: ANSIBLE_PERSISTENT_BUFFER_READ_TIMEOUT vars: - name: ansible_buffer_read_timeout persistent_log_messages: type: boolean description: - This flag will enable logging the command executed and response received from target device in the ansible log file. For this option to work 'log_path' ansible configuration option is required to be set to a file path with write access. - Be sure to fully understand the security implications of enabling this option as it could create a security vulnerability by logging sensitive information in log file. default: False ini: - section: persistent_connection key: log_messages env: - name: ANSIBLE_PERSISTENT_LOG_MESSAGES vars: - name: ansible_persistent_log_messages terminal_stdout_re: type: list elements: dict version_added: '2.9' description: - A single regex pattern or a sequence of patterns along with optional flags to match the command prompt from the received response chunk. This option accepts C(pattern) and C(flags) keys. The value of C(pattern) is a python regex pattern to match the response and the value of C(flags) is the value accepted by I(flags) argument of I(re.compile) python method to control the way regex is matched with the response, for example I('re.I'). vars: - name: ansible_terminal_stdout_re terminal_stderr_re: type: list elements: dict version_added: '2.9' description: - This option provides the regex pattern and optional flags to match the error string from the received response chunk. This option accepts C(pattern) and C(flags) keys. The value of C(pattern) is a python regex pattern to match the response and the value of C(flags) is the value accepted by I(flags) argument of I(re.compile) python method to control the way regex is matched with the response, for example I('re.I'). vars: - name: ansible_terminal_stderr_re terminal_initial_prompt: type: list version_added: '2.9' description: - A single regex pattern or a sequence of patterns to evaluate the expected prompt at the time of initial login to the remote host. vars: - name: ansible_terminal_initial_prompt terminal_initial_answer: type: list version_added: '2.9' description: - The answer to reply with if the C(terminal_initial_prompt) is matched. The value can be a single answer or a list of answers for multiple terminal_initial_prompt. In case the login menu has multiple prompts the sequence of the prompt and excepted answer should be in same order and the value of I(terminal_prompt_checkall) should be set to I(True) if all the values in C(terminal_initial_prompt) are expected to be matched and set to I(False) if any one login prompt is to be matched. vars: - name: ansible_terminal_initial_answer terminal_initial_prompt_checkall: type: boolean version_added: '2.9' description: - By default the value is set to I(False) and any one of the prompts mentioned in C(terminal_initial_prompt) option is matched it won't check for other prompts. When set to I(True) it will check for all the prompts mentioned in C(terminal_initial_prompt) option in the given order and all the prompts should be received from remote host if not it will result in timeout. default: False vars: - name: ansible_terminal_initial_prompt_checkall terminal_inital_prompt_newline: type: boolean version_added: '2.9' description: - This boolean flag, that when set to I(True) will send newline in the response if any of values in I(terminal_initial_prompt) is matched. default: True vars: - name: ansible_terminal_initial_prompt_newline network_cli_retries: description: - Number of attempts to connect to remote host. The delay time between the retires increases after every attempt by power of 2 in seconds till either the maximum attempts are exhausted or any of the C(persistent_command_timeout) or C(persistent_connect_timeout) timers are triggered. default: 3 version_added: '2.9' type: integer env: - name: ANSIBLE_NETWORK_CLI_RETRIES ini: - section: persistent_connection key: network_cli_retries vars: - name: ansible_network_cli_retries """ import getpass import json import logging import re import os import signal import socket import time import traceback from io import BytesIO from ansible.errors import AnsibleConnectionFailure from ansible.module_utils.six import PY3 from ansible.module_utils.six.moves import cPickle from ansible.module_utils.network.common.utils import to_list from ansible.module_utils._text import to_bytes, to_text from ansible.playbook.play_context import PlayContext from ansible.plugins.connection import NetworkConnectionBase, ensure_connect from ansible.plugins.loader import cliconf_loader, terminal_loader, connection_loader class AnsibleCmdRespRecv(Exception): pass class Connection(NetworkConnectionBase): ''' CLI (shell) SSH connections on Paramiko ''' transport = 'network_cli' has_pipelining = True def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) self._ssh_shell = None self._matched_prompt = None self._matched_cmd_prompt = None self._matched_pattern = None self._last_response = None self._history = list() self._command_response = None self._terminal = None self.cliconf = None self.paramiko_conn = None if self._play_context.verbosity > 3: logging.getLogger('paramiko').setLevel(logging.DEBUG) if self._network_os: self._terminal = terminal_loader.get(self._network_os, self) if not self._terminal: raise AnsibleConnectionFailure('network os %s is not supported' % self._network_os) self.cliconf = cliconf_loader.get(self._network_os, self) if self.cliconf: self.queue_message('vvvv', 'loaded cliconf plugin for network_os %s' % self._network_os) self._sub_plugin = {'type': 'cliconf', 'name': self._network_os, 'obj': self.cliconf} else: self.queue_message('vvvv', 'unable to load cliconf for network_os %s' % self._network_os) else: raise AnsibleConnectionFailure( 'Unable to automatically determine host network os. Please ' 'manually configure ansible_network_os value for this host' ) self.queue_message('log', 'network_os is set to %s' % self._network_os) def _get_log_channel(self): name = "p=%s u=%s | " % (os.getpid(), getpass.getuser()) name += "paramiko [%s]" % self._play_context.remote_addr return name @ensure_connect def get_prompt(self): """Returns the current prompt from the device""" return self._matched_prompt def exec_command(self, cmd, in_data=None, sudoable=True): # this try..except block is just to handle the transition to supporting # network_cli as a toplevel connection. Once connection=local is gone, # this block can be removed as well and all calls passed directly to # the local connection if self._ssh_shell: try: cmd = json.loads(to_text(cmd, errors='surrogate_or_strict')) kwargs = {'command': to_bytes(cmd['command'], errors='surrogate_or_strict')} for key in ('prompt', 'answer', 'sendonly', 'newline', 'prompt_retry_check'): if cmd.get(key) is True or cmd.get(key) is False: kwargs[key] = cmd[key] elif cmd.get(key) is not None: kwargs[key] = to_bytes(cmd[key], errors='surrogate_or_strict') return self.send(**kwargs) except ValueError: cmd = to_bytes(cmd, errors='surrogate_or_strict') return self.send(command=cmd) else: return super(Connection, self).exec_command(cmd, in_data, sudoable) def update_play_context(self, pc_data): """Updates the play context information for the connection""" pc_data = to_bytes(pc_data) if PY3: pc_data = cPickle.loads(pc_data, encoding='bytes') else: pc_data = cPickle.loads(pc_data) play_context = PlayContext() play_context.deserialize(pc_data) self.queue_message('vvvv', 'updating play_context for connection') if self._play_context.become ^ play_context.become: if play_context.become is True: auth_pass = play_context.become_pass self._terminal.on_become(passwd=auth_pass) self.queue_message('vvvv', 'authorizing connection') else: self._terminal.on_unbecome() self.queue_message('vvvv', 'deauthorizing connection') self._play_context = play_context if hasattr(self, 'reset_history'): self.reset_history() if hasattr(self, 'disable_response_logging'): self.disable_response_logging() def _connect(self): ''' Connects to the remote device and starts the terminal ''' if not self.connected: self.paramiko_conn = connection_loader.get('paramiko', self._play_context, '/dev/null') self.paramiko_conn._set_log_channel(self._get_log_channel()) self.paramiko_conn.set_options(direct={'look_for_keys': not bool(self._play_context.password and not self._play_context.private_key_file)}) self.paramiko_conn.force_persistence = self.force_persistence command_timeout = self.get_option('persistent_command_timeout') max_pause = min([self.get_option('persistent_connect_timeout'), command_timeout]) retries = self.get_option('network_cli_retries') total_pause = 0 for attempt in range(retries + 1): try: ssh = self.paramiko_conn._connect() break except Exception as e: pause = 2 ** (attempt + 1) if attempt == retries or total_pause >= max_pause: raise AnsibleConnectionFailure(to_text(e, errors='surrogate_or_strict')) else: msg = (u"network_cli_retry: attempt: %d, caught exception(%s), " u"pausing for %d seconds" % (attempt + 1, to_text(e, errors='surrogate_or_strict'), pause)) self.queue_message('vv', msg) time.sleep(pause) total_pause += pause continue self.queue_message('vvvv', 'ssh connection done, setting terminal') self._connected = True self._ssh_shell = ssh.ssh.invoke_shell() self._ssh_shell.settimeout(command_timeout) self.queue_message('vvvv', 'loaded terminal plugin for network_os %s' % self._network_os) terminal_initial_prompt = self.get_option('terminal_initial_prompt') or self._terminal.terminal_initial_prompt terminal_initial_answer = self.get_option('terminal_initial_answer') or self._terminal.terminal_initial_answer newline = self.get_option('terminal_inital_prompt_newline') or self._terminal.terminal_inital_prompt_newline check_all = self.get_option('terminal_initial_prompt_checkall') or False self.receive(prompts=terminal_initial_prompt, answer=terminal_initial_answer, newline=newline, check_all=check_all) self.queue_message('vvvv', 'firing event: on_open_shell()') self._terminal.on_open_shell() if self._play_context.become and self._play_context.become_method == 'enable': self.queue_message('vvvv', 'firing event: on_become') auth_pass = self._play_context.become_pass self._terminal.on_become(passwd=auth_pass) self.queue_message('vvvv', 'ssh connection has completed successfully') return self def close(self): ''' Close the active connection to the device ''' # only close the connection if its connected. if self._connected: self.queue_message('debug', "closing ssh connection to device") if self._ssh_shell: self.queue_message('debug', "firing event: on_close_shell()") self._terminal.on_close_shell() self._ssh_shell.close() self._ssh_shell = None self.queue_message('debug', "cli session is now closed") self.paramiko_conn.close() self.paramiko_conn = None self.queue_message('debug', "ssh connection has been closed successfully") super(Connection, self).close() def receive(self, command=None, prompts=None, answer=None, newline=True, prompt_retry_check=False, check_all=False): ''' Handles receiving of output from command ''' self._matched_prompt = None self._matched_cmd_prompt = None recv = BytesIO() handled = False command_prompt_matched = False matched_prompt_window = window_count = 0 # set terminal regex values for command prompt and errors in response self._terminal_stderr_re = self._get_terminal_std_re('terminal_stderr_re') self._terminal_stdout_re = self._get_terminal_std_re('terminal_stdout_re') cache_socket_timeout = self._ssh_shell.gettimeout() command_timeout = self.get_option('persistent_command_timeout') self._validate_timeout_value(command_timeout, "persistent_command_timeout") if cache_socket_timeout != command_timeout: self._ssh_shell.settimeout(command_timeout) buffer_read_timeout = self.get_option('persistent_buffer_read_timeout') self._validate_timeout_value(buffer_read_timeout, "persistent_buffer_read_timeout") self._log_messages("command: %s" % command) while True: if command_prompt_matched: try: signal.signal(signal.SIGALRM, self._handle_buffer_read_timeout) signal.setitimer(signal.ITIMER_REAL, buffer_read_timeout) data = self._ssh_shell.recv(256) signal.alarm(0) self._log_messages("response-%s: %s" % (window_count + 1, data)) # if data is still received on channel it indicates the prompt string # is wrongly matched in between response chunks, continue to read # remaining response. command_prompt_matched = False # restart command_timeout timer signal.signal(signal.SIGALRM, self._handle_command_timeout) signal.alarm(command_timeout) except AnsibleCmdRespRecv: # reset socket timeout to global timeout self._ssh_shell.settimeout(cache_socket_timeout) return self._command_response else: data = self._ssh_shell.recv(256) self._log_messages("response-%s: %s" % (window_count + 1, data)) # when a channel stream is closed, received data will be empty if not data: break recv.write(data) offset = recv.tell() - 256 if recv.tell() > 256 else 0 recv.seek(offset) window = self._strip(recv.read()) window_count += 1 if prompts and not handled: handled = self._handle_prompt(window, prompts, answer, newline, False, check_all) matched_prompt_window = window_count elif prompts and handled and prompt_retry_check and matched_prompt_window + 1 == window_count: # check again even when handled, if same prompt repeats in next window # (like in the case of a wrong enable password, etc) indicates # value of answer is wrong, report this as error. if self._handle_prompt(window, prompts, answer, newline, prompt_retry_check, check_all): raise AnsibleConnectionFailure("For matched prompt '%s', answer is not valid" % self._matched_cmd_prompt) if self._find_prompt(window): self._last_response = recv.getvalue() resp = self._strip(self._last_response) self._command_response = self._sanitize(resp, command) if buffer_read_timeout == 0.0: # reset socket timeout to global timeout self._ssh_shell.settimeout(cache_socket_timeout) return self._command_response else: command_prompt_matched = True @ensure_connect def send(self, command, prompt=None, answer=None, newline=True, sendonly=False, prompt_retry_check=False, check_all=False): ''' Sends the command to the device in the opened shell ''' if check_all: prompt_len = len(to_list(prompt)) answer_len = len(to_list(answer)) if prompt_len != answer_len: raise AnsibleConnectionFailure("Number of prompts (%s) is not same as that of answers (%s)" % (prompt_len, answer_len)) try: cmd = b'%s\r' % command self._history.append(cmd) self._ssh_shell.sendall(cmd) self._log_messages('send command: %s' % cmd) if sendonly: return response = self.receive(command, prompt, answer, newline, prompt_retry_check, check_all) return to_text(response, errors='surrogate_or_strict') except (socket.timeout, AttributeError): self.queue_message('error', traceback.format_exc()) raise AnsibleConnectionFailure("timeout value %s seconds reached while trying to send command: %s" % (self._ssh_shell.gettimeout(), command.strip())) def _handle_buffer_read_timeout(self, signum, frame): self.queue_message('vvvv', "Response received, triggered 'persistent_buffer_read_timeout' timer of %s seconds" % self.get_option('persistent_buffer_read_timeout')) raise AnsibleCmdRespRecv() def _handle_command_timeout(self, signum, frame): msg = 'command timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and Troubleshooting Guide.'\ % self.get_option('persistent_command_timeout') self.queue_message('log', msg) raise AnsibleConnectionFailure(msg) def _strip(self, data): ''' Removes ANSI codes from device response ''' for regex in self._terminal.ansi_re: data = regex.sub(b'', data) return data def _handle_prompt(self, resp, prompts, answer, newline, prompt_retry_check=False, check_all=False): ''' Matches the command prompt and responds :arg resp: Byte string containing the raw response from the remote :arg prompts: Sequence of byte strings that we consider prompts for input :arg answer: Sequence of Byte string to send back to the remote if we find a prompt. A carriage return is automatically appended to this string. :param prompt_retry_check: Bool value for trying to detect more prompts :param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of given prompt. :returns: True if a prompt was found in ``resp``. If check_all is True will True only after all the prompt in the prompts list are matched. False otherwise. ''' single_prompt = False if not isinstance(prompts, list): prompts = [prompts] single_prompt = True if not isinstance(answer, list): answer = [answer] prompts_regex = [re.compile(to_bytes(r), re.I) for r in prompts] for index, regex in enumerate(prompts_regex): match = regex.search(resp) if match: self._matched_cmd_prompt = match.group() self._log_messages("matched command prompt: %s" % self._matched_cmd_prompt) # if prompt_retry_check is enabled to check if same prompt is # repeated don't send answer again. if not prompt_retry_check: prompt_answer = answer[index] if len(answer) > index else answer[0] self._ssh_shell.sendall(b'%s' % prompt_answer) if newline: self._ssh_shell.sendall(b'\r') prompt_answer += b'\r' self._log_messages("matched command prompt answer: %s" % prompt_answer) if check_all and prompts and not single_prompt: prompts.pop(0) answer.pop(0) return False return True return False def _sanitize(self, resp, command=None): ''' Removes elements from the response before returning to the caller ''' cleaned = [] for line in resp.splitlines(): if command and line.strip() == command.strip(): continue for prompt in self._matched_prompt.strip().splitlines(): if prompt.strip() in line: break else: cleaned.append(line) return b'\n'.join(cleaned).strip() def _find_prompt(self, response): '''Searches the buffered response for a matching command prompt ''' errored_response = None is_error_message = False for regex in self._terminal_stderr_re: if regex.search(response): is_error_message = True # Check if error response ends with command prompt if not # receive it buffered prompt for regex in self._terminal_stdout_re: match = regex.search(response) if match: errored_response = response self._matched_pattern = regex.pattern self._matched_prompt = match.group() self._log_messages("matched error regex '%s' from response '%s'" % (self._matched_pattern, errored_response)) break if not is_error_message: for regex in self._terminal_stdout_re: match = regex.search(response) if match: self._matched_pattern = regex.pattern self._matched_prompt = match.group() self._log_messages("matched cli prompt '%s' with regex '%s' from response '%s'" % (self._matched_prompt, self._matched_pattern, response)) if not errored_response: return True if errored_response: raise AnsibleConnectionFailure(errored_response) return False def _validate_timeout_value(self, timeout, timer_name): if timeout < 0: raise AnsibleConnectionFailure("'%s' timer value '%s' is invalid, value should be greater than or equal to zero." % (timer_name, timeout)) def transport_test(self, connect_timeout): """This method enables wait_for_connection to work. As it is used by wait_for_connection, it is called by that module's action plugin, which is on the controller process, which means that nothing done on this instance should impact the actual persistent connection... this check is for informational purposes only and should be properly cleaned up. """ # Force a fresh connect if for some reason we have connected before. self.close() self._connect() self.close() def _get_terminal_std_re(self, option): terminal_std_option = self.get_option(option) terminal_std_re = [] if terminal_std_option: for item in terminal_std_option: if "pattern" not in item: raise AnsibleConnectionFailure("'pattern' is a required key for option '%s'," " received option value is %s" % (option, item)) pattern = br"%s" % to_bytes(item['pattern']) flag = item.get('flags', 0) if flag: flag = getattr(re, flag.split('.')[1]) terminal_std_re.append(re.compile(pattern, flag)) else: # To maintain backward compatibility terminal_std_re = getattr(self._terminal, option) return terminal_std_re
closed
ansible/ansible
https://github.com/ansible/ansible
61,568
net_put and possible other action plugins broken in devel
##### SUMMARY The `net_put` action plugin and likely other action plugins that use the `network_cli` connection plugin in the latest devel branch. I tracked the problem to the following commit: https://github.com/ansible/ansible/commit/7d3c4a88823846cbcea7c61de38658a6d63d4265 If I checkout the commit that immediately precedes this commit the module functions properly. Note: This also impacts the following PR since I am using the same connection logic as `net_put`. https://github.com/ansible/ansible/pull/60643 ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME network_cli plugin ##### ANSIBLE VERSION ```paste below ansible 2.9.0.dev0 config file = /etc/ansible/ansible.cfg configured module search path = [u'/Users/mwiebe/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/lib/ansible executable location = /Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/bin/ansible python version = 2.7.13 (default, Apr 4 2017, 08:47:57) [GCC 4.2.1 Compatible Apple LLVM 8.1.0 (clang-802.0.38)] ``` ##### CONFIGURATION ```paste below DEFAULT_ROLES_PATH(env: ANSIBLE_ROLES_PATH) = [u'/Users/mwiebe/Projects/nxos_ansible/ansible_sanity_check/test/integration/targets'] PARAMIKO_HOST_KEY_AUTO_ADD(env: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD) = True PERSISTENT_COMMAND_TIMEOUT(env: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT) = 1000 PERSISTENT_CONNECT_TIMEOUT(env: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT) = 1000 ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE I am using the following playbook to demonstrate the problem and running it against an Nexus n9k device. ```yaml tasks: - name: copy file from ansible controller to a network device net_put: src: '/Users/mwiebe/Projects/nxos_ansible/fix_ansible/test/integration/network-integration.cfg' dest: 'bootflash:' ``` I added a debug tracepoint inside `net_put` to display the problem: Checkout commit that immediately precedes the problem commit: ```git commit 7d3c4a88823846cbcea7c61de38658a6d63d4265 (HEAD) Author: Nathaniel Case <[email protected]> Date: Wed Aug 14 16:58:03 2019 -0400 Delay persistent connection until needed (#59153) * Delay calling connect() until absolutely necessary * Implement transport_test to enable wait_for_connection * plugin might be connected already for some reason? * ensure_connect for httpapi There's some become shenanigans still needing to be ironed out * Fix tests for network_cli commit f02f5c4b5dad36d696f028078ab545d75ba93d31 Author: René Moser <[email protected]> Date: Wed Aug 14 22:55:31 2019 +0200 cloudscale_server: add tags support (#60396) ``` ``` git checkout f02f5c4b5dad36d696f028078ab545d75ba93d31 ``` Add tracepoint and run the test: ```diff diff --git a/lib/ansible/plugins/action/net_put.py b/lib/ansible/plugins/action/net_put.py index bf6dd52d29..6506b7d11b 100644 --- a/lib/ansible/plugins/action/net_put.py +++ b/lib/ansible/plugins/action/net_put.py @@ -98,6 +98,7 @@ class ActionModule(ActionBase): socket_path = self._connection.socket_path conn = Connection(socket_path) + import epdb ; epdb.set_trace() sock_timeout = conn.get_option('persistent_command_timeout') ``` ```diff (Epdb) list 97 if socket_path is None: 98 socket_path = self._connection.socket_path 99 100 conn = Connection(socket_path) 101 import epdb ; epdb.set_trace() 102 -> sock_timeout = conn.get_option('persistent_command_timeout') 103 104 if dest is None: 105 dest = src_file_path_name 106 107 try: (Epdb) conn.exec_command('show version') u'Cisco Nexus Operating System (NX-OS) Software\nTAC support: http://www.cisco.com/tac\nCopyright (C) 2002-2019, Cisco and/or its affiliates.\nAll rights reserved.\nThe copyrights to certain works contained in this software are\nowned by other third parties and used and distributed under their own\nlicenses, such as open source. This software is provided "as is," and unless\notherwise stated, there is no warranty, express or implied, including but not\nlimited to warranties of merchantability and fitness for a particular purpose.\nCertain components of this software are licensed under\nthe GNU General Public License (GPL) version 2.0 or \nGNU General Public License (GPL) version 3.0 or the GNU\nLesser General Public License (LGPL) Version 2.1 or \nLesser General Public License (LGPL) Version 2.0. \nA copy of each such license is available at\nhttp://www.opensource.org/licenses/gpl-2.0.php and\nhttp://opensource.org/licenses/gpl-3.0.html and\nhttp://www.opensource.org/licenses/lgpl-2.1.php and\nhttp://www.gnu.org/licenses/old-licenses/library.txt.\n\nSoftware\n BIOS: version 08.34\n NXOS: version 9.3(1) [build 9.2(1)IDI9(0.334)]\n BIOS compile time: 04/26/2018\n NXOS image file is: bootflash:///nxos.glmatthe.bin\n NXOS compile time: 8/28/2019 17:00:00 [08/29/2019 18:32:39]\n\n\nHardware\n cisco Nexus9000 C9504 (4 Slot) Chassis ("Supervisor Module")\n Intel(R) Xeon(R) CPU E5-2403 0 @ 1.80GHz with 16399704 kB of memory.\n Processor Board ID SAL1909A7VC\n\n Device name: n9k-109\n bootflash: 53298520 kB\nKernel uptime is 0 day(s), 0 hour(s), 8 minute(s), 30 second(s)\n\nLast reset at 165683 usecs after Thu Aug 29 19:03:48 2019\n Reason: Reset Requested by CLI command reload\n System version: 9.3(1)\n Service: \n\nplugin\n Core Plugin, Ethernet Plugin\n\nActive Package(s):' (Epdb) ``` As you can see it retrieves the version info properly. Now, if I checkout 7d3c4a88823846cbcea7c61de38658a6d63d4265 I see the issue: ``` (Epdb) list 97 if socket_path is None: 98 socket_path = self._connection.socket_path 99 100 conn = Connection(socket_path) 101 import epdb ; epdb.set_trace() 102 -> sock_timeout = conn.get_option('persistent_command_timeout') 103 104 if dest is None: 105 dest = src_file_path_name 106 107 try: (Epdb) conn.exec_command('show version') [127, u'', u'/bin/sh: show: command not found\n'] (Epdb) ``` The exception behind this is: ``` "msg": "Exception received : 'NoneType' object has no attribute '_connect_uncached'" ```
https://github.com/ansible/ansible/issues/61568
https://github.com/ansible/ansible/pull/61570
6e8d430872820d2bcbcb010f092443403317a511
50e09be14f0b055440a3b7df7ed916c8c24bdae2
2019-08-29T19:15:51Z
python
2019-09-09T20:59:20Z
test/units/plugins/connection/test_network_cli.py
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re import json from units.compat import unittest from units.compat.mock import patch, MagicMock from ansible.module_utils._text import to_text from ansible.errors import AnsibleConnectionFailure from ansible.playbook.play_context import PlayContext from ansible.plugins.loader import connection_loader class TestConnectionClass(unittest.TestCase): def test_network_cli__invalid_os(self): pc = PlayContext() pc.network_os = 'does not exist' self.assertRaises(AnsibleConnectionFailure, connection_loader.get, 'network_cli', pc, '/dev/null') def test_network_cli__no_os(self): pc = PlayContext() pc.network_os = None self.assertRaises(AnsibleConnectionFailure, connection_loader.get, 'network_cli', pc, '/dev/null') @patch("ansible.plugins.connection.network_cli.terminal_loader") @patch("ansible.plugins.connection.paramiko_ssh.Connection._connect") def test_network_cli__connect(self, mocked_super, mocked_terminal_loader): pc = PlayContext() pc.network_os = 'ios' conn = connection_loader.get('network_cli', pc, '/dev/null') conn.ssh = MagicMock() conn.receive = MagicMock() conn._connect() self.assertTrue(conn._terminal.on_open_shell.called) self.assertFalse(conn._terminal.on_become.called) conn._play_context.become = True conn._play_context.become_method = 'enable' conn._play_context.become_pass = 'password' conn._connected = False conn._connect() conn._terminal.on_become.assert_called_with(passwd='password') @patch("ansible.plugins.connection.paramiko_ssh.Connection.close") def test_network_cli_close(self, mocked_super): pc = PlayContext() pc.network_os = 'ios' conn = connection_loader.get('network_cli', pc, '/dev/null') terminal = MagicMock(supports_multiplexing=False) conn._terminal = terminal conn._ssh_shell = MagicMock() conn.paramiko_conn = MagicMock() conn._connected = True conn.close() self.assertTrue(terminal.on_close_shell.called) self.assertIsNone(conn._ssh_shell) self.assertIsNone(conn.paramiko_conn) @patch("ansible.plugins.connection.paramiko_ssh.Connection._connect") def test_network_cli_exec_command(self, mocked_super): pc = PlayContext() pc.network_os = 'ios' conn = connection_loader.get('network_cli', pc, '/dev/null') mock_send = MagicMock(return_value=b'command response') conn.send = mock_send conn._ssh_shell = MagicMock() # test sending a single command and converting to dict out = conn.exec_command('command') self.assertEqual(out, b'command response') mock_send.assert_called_with(command=b'command') # test sending a json string out = conn.exec_command(json.dumps({'command': 'command'})) self.assertEqual(out, b'command response') mock_send.assert_called_with(command=b'command') @patch("ansible.plugins.connection.network_cli.Connection._get_terminal_std_re") @patch("ansible.plugins.connection.network_cli.Connection._connect") def test_network_cli_send(self, mocked_connect, mocked_terminal_re): pc = PlayContext() pc.network_os = 'ios' conn = connection_loader.get('network_cli', pc, '/dev/null') mock__terminal = MagicMock() mocked_terminal_re.side_effect = [[re.compile(b'^ERROR')], [re.compile(b'device#')]] conn._terminal = mock__terminal mock__shell = MagicMock() conn._ssh_shell = mock__shell response = b"""device#command command response device# """ mock__shell.recv.side_effect = [response, None] output = conn.send(b'command') mock__shell.sendall.assert_called_with(b'command\r') self.assertEqual(to_text(conn._command_response), 'command response') mock__shell.reset_mock() mock__shell.recv.side_effect = [b"ERROR: error message device#"] mocked_terminal_re.side_effect = [[re.compile(b'^ERROR')], [re.compile(b'device#')]] with self.assertRaises(AnsibleConnectionFailure) as exc: conn.send(b'command') self.assertEqual(str(exc.exception), 'ERROR: error message device#')
closed
ansible/ansible
https://github.com/ansible/ansible
61,951
ansible-test --venv attempts to install to system locations
##### SUMMARY After installing ansible-2.90beta1 via pip, ansible-test --venv tries to install to system locations, getting permission denied ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9.0beta1 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> * I installed this on CENTOS7 using the python3.6 package * Installed 2.9.0beta1 from pypi ##### STEPS TO REPRODUCE ``` $ python3.6 -m pip install --user ansible==2.9.0beta1 $ ansible --version ansible 2.9.0b1 config file = None configured module search path = ['/home/badger/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/badger/.local/lib/python3.6/site-packages/ansible executable location = /home/badger/.local/bin/ansible python version = 3.6.8 (default, Apr 25 2019, 21:02:35) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] $ ansible-test sanity --venv --test pep8 lib/ansible/module_utils/basic.py ``` ##### EXPECTED RESULTS ansible-test installs a venv with the required packages for the sanity test and then runs the test on lib/ansible/module_utils/basic.py ##### ACTUAL RESULTS ``` Ignoring coverage: markers "python_version > '3.7'" don't match your environment Ignoring cryptography: markers "python_version < '2.7'" don't match your environment Ignoring deepdiff: markers "python_version < '3'" don't match your environment Ignoring urllib3: markers "python_version < '2.7'" don't match your environment Ignoring sphinx: markers "python_version < '2.7'" don't match your environment Ignoring wheel: markers "python_version < '2.7'" don't match your environment Ignoring yamllint: markers "python_version < '2.7'" don't match your environment Ignoring paramiko: markers "python_version < '2.7'" don't match your environment Ignoring pytest: markers "python_version < '2.7'" don't match your environment Ignoring pytest: markers "python_version == '2.7'" don't match your environment Ignoring pytest-forked: markers "python_version < '2.7'" don't match your environment Ignoring requests: markers "python_version < '2.7'" don't match your environment Ignoring virtualenv: markers "python_version < '2.7'" don't match your environment Ignoring pyopenssl: markers "python_version < '2.7'" don't match your environment Ignoring pyyaml: markers "python_version < '2.7'" don't match your environment Ignoring pycparser: markers "python_version < '2.7'" don't match your environment Ignoring xmltodict: markers "python_version < '2.7'" don't match your environment Ignoring lxml: markers "python_version < '2.7'" don't match your environment Ignoring pyvmomi: markers "python_version < '2.7'" don't match your environment Collecting voluptuous>=0.11.0 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 25)) Using cached https://files.pythonhosted.org/packages/24/3b/fe531688c0d9e057fccc0bc9430c0a3d4b90e0d2f015326e659c2944e328/voluptuous-0.11.7.tar.gz Collecting pylint==2.3.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 45)) Using cached https://files.pythonhosted.org/packages/60/c2/b3f73f4ac008bef6e75bca4992f3963b3f85942e0277237721ef1c151f0d/pylint-2.3.1-py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): cryptography in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): jinja2 in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 2)) Collecting pycodestyle (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 3)) Using cached https://files.pythonhosted.org/packages/0e/0c/04a353e104d2f324f8ee5f4b32012618c1c86dd79e52a433b64fceed511b/pycodestyle-2.5.0-py2.py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): pyyaml in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 5)) Requirement already satisfied (use --upgrade to upgrade): rstcheck in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 6)) Collecting virtualenv (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 7)) Using cached https://files.pythonhosted.org/packages/8b/12/8d4f45b8962b03ac9efefe5ed5053f6b29334d83e438b4fe379d21c0cb8e/virtualenv-16.7.5-py2.py3-none-any.whl Collecting yamllint (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 9)) Using cached https://files.pythonhosted.org/packages/39/b5/390c956b1aad9a0de18cffa94dba8610b9eca4bd142aa56746e31a388f14/yamllint-1.17.0-py2.py3-none-any.whl Collecting astroid==2.2.5 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 41)) Using cached https://files.pythonhosted.org/packages/d5/ad/7221a62a2dbce5c3b8c57fd18e1052c7331adc19b3f27f1561aa6e620db2/astroid-2.2.5-py3-none-any.whl Collecting mccabe==0.6.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 44)) Using cached https://files.pythonhosted.org/packages/87/89/479dc97e18549e21354893e4ee4ef36db1d237534982482c3681ee6e7b57/mccabe-0.6.1-py2.py3-none-any.whl Collecting isort==4.3.15 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 42)) Using cached https://files.pythonhosted.org/packages/b6/89/3137d13dd30a0d063435661950f6dfd50957532989e49aef652f490ef616/isort-4.3.15-py2.py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): asn1crypto>=0.21.0 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): six>=1.4.1 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): cffi!=1.11.3,>=1.8 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): MarkupSafe>=0.23 in /home/badger/.local/lib/python3.6/site-packages (from jinja2->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 2)) Requirement already satisfied (use --upgrade to upgrade): docutils>=0.7 in /home/badger/.local/lib/python3.6/site-packages (from rstcheck->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 6)) Collecting pathspec>=0.5.3 (from yamllint->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 9)) Using cached https://files.pythonhosted.org/packages/84/2a/bfee636b1e2f7d6e30dd74f49201ccfa5c3cf322d44929ecc6c137c486c5/pathspec-0.5.9.tar.gz Collecting typed-ast==1.4.0 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 46)) Using cached https://files.pythonhosted.org/packages/31/d3/9d1802c161626d0278bafb1ffb32f76b9d01e123881bbf9d91e8ccf28e18/typed_ast-1.4.0-cp36-cp36m-manylinux1_x86_64.whl Collecting wrapt==1.11.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 47)) Using cached https://files.pythonhosted.org/packages/67/b2/0f71ca90b0ade7fad27e3d20327c996c6252a2ffe88f50a95bba7434eda9/wrapt-1.11.1.tar.gz Collecting lazy-object-proxy==1.3.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 43)) Using cached https://files.pythonhosted.org/packages/65/1f/2043ec33066e779905ed7e6580384425fdc7dc2ac64d6931060c75b0c5a3/lazy_object_proxy-1.3.1-cp36-cp36m-manylinux1_x86_64.whl Requirement already satisfied (use --upgrade to upgrade): pycparser in /home/badger/.local/lib/python3.6/site-packages (from cffi!=1.11.3,>=1.8->cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Installing collected packages: voluptuous, typed-ast, wrapt, lazy-object-proxy, astroid, isort, mccabe, pylint, pycodestyle, virtualenv, pathspec, yamllint Running setup.py install for voluptuous ... error Complete output from command /var/tmp/tmpi3kn3062/python3.6 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-n_ufdj0i/voluptuous/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-nzpu8u3y-record/install-record.txt --single-version-externally-managed --compile: running install running build running build_py creating build creating build/lib creating build/lib/voluptuous copying voluptuous/__init__.py -> build/lib/voluptuous copying voluptuous/error.py -> build/lib/voluptuous copying voluptuous/humanize.py -> build/lib/voluptuous copying voluptuous/schema_builder.py -> build/lib/voluptuous copying voluptuous/util.py -> build/lib/voluptuous copying voluptuous/validators.py -> build/lib/voluptuous running install_lib creating /usr/local/lib/python3.6 error: could not create '/usr/local/lib/python3.6': Permission denied ---------------------------------------- Command "/var/tmp/tmpi3kn3062/python3.6 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-n_ufdj0i/voluptuous/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-nzpu8u3y-record/install-record.txt --single-version-externally-managed --compile" failed with error code 1 in /tmp/pip-build-n_ufdj0i/voluptuous/ ERROR: Command "/var/tmp/tmpi3kn3062/python3.6 -m pip.__main__ install --disable-pip-version-check -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt" returned exit status 1. ERROR: Command "/usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/home/badger/ansible LC_ALL=en_US.UTF-8 /var/tmp/tmpi3kn3062/python3.6 /home/badger/.local/bin/ansible-test sanity --test pep8 lib/ansible/module_utils/basic.py --metadata metadata-c4as9ur5.json --truncate 117 --color yes --requirements" returned exit status 1. ```
https://github.com/ansible/ansible/issues/61951
https://github.com/ansible/ansible/pull/62033
e3ea89801bae73921b298e7b4628860a272e942c
c77ab110514900ee439c7281a1d1dd14504cf44a
2019-09-06T18:15:00Z
python
2019-09-10T01:32:29Z
changelogs/fragments/ansible-test-execv-wrapper-shebang.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,951
ansible-test --venv attempts to install to system locations
##### SUMMARY After installing ansible-2.90beta1 via pip, ansible-test --venv tries to install to system locations, getting permission denied ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9.0beta1 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> * I installed this on CENTOS7 using the python3.6 package * Installed 2.9.0beta1 from pypi ##### STEPS TO REPRODUCE ``` $ python3.6 -m pip install --user ansible==2.9.0beta1 $ ansible --version ansible 2.9.0b1 config file = None configured module search path = ['/home/badger/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/badger/.local/lib/python3.6/site-packages/ansible executable location = /home/badger/.local/bin/ansible python version = 3.6.8 (default, Apr 25 2019, 21:02:35) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] $ ansible-test sanity --venv --test pep8 lib/ansible/module_utils/basic.py ``` ##### EXPECTED RESULTS ansible-test installs a venv with the required packages for the sanity test and then runs the test on lib/ansible/module_utils/basic.py ##### ACTUAL RESULTS ``` Ignoring coverage: markers "python_version > '3.7'" don't match your environment Ignoring cryptography: markers "python_version < '2.7'" don't match your environment Ignoring deepdiff: markers "python_version < '3'" don't match your environment Ignoring urllib3: markers "python_version < '2.7'" don't match your environment Ignoring sphinx: markers "python_version < '2.7'" don't match your environment Ignoring wheel: markers "python_version < '2.7'" don't match your environment Ignoring yamllint: markers "python_version < '2.7'" don't match your environment Ignoring paramiko: markers "python_version < '2.7'" don't match your environment Ignoring pytest: markers "python_version < '2.7'" don't match your environment Ignoring pytest: markers "python_version == '2.7'" don't match your environment Ignoring pytest-forked: markers "python_version < '2.7'" don't match your environment Ignoring requests: markers "python_version < '2.7'" don't match your environment Ignoring virtualenv: markers "python_version < '2.7'" don't match your environment Ignoring pyopenssl: markers "python_version < '2.7'" don't match your environment Ignoring pyyaml: markers "python_version < '2.7'" don't match your environment Ignoring pycparser: markers "python_version < '2.7'" don't match your environment Ignoring xmltodict: markers "python_version < '2.7'" don't match your environment Ignoring lxml: markers "python_version < '2.7'" don't match your environment Ignoring pyvmomi: markers "python_version < '2.7'" don't match your environment Collecting voluptuous>=0.11.0 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 25)) Using cached https://files.pythonhosted.org/packages/24/3b/fe531688c0d9e057fccc0bc9430c0a3d4b90e0d2f015326e659c2944e328/voluptuous-0.11.7.tar.gz Collecting pylint==2.3.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 45)) Using cached https://files.pythonhosted.org/packages/60/c2/b3f73f4ac008bef6e75bca4992f3963b3f85942e0277237721ef1c151f0d/pylint-2.3.1-py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): cryptography in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): jinja2 in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 2)) Collecting pycodestyle (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 3)) Using cached https://files.pythonhosted.org/packages/0e/0c/04a353e104d2f324f8ee5f4b32012618c1c86dd79e52a433b64fceed511b/pycodestyle-2.5.0-py2.py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): pyyaml in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 5)) Requirement already satisfied (use --upgrade to upgrade): rstcheck in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 6)) Collecting virtualenv (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 7)) Using cached https://files.pythonhosted.org/packages/8b/12/8d4f45b8962b03ac9efefe5ed5053f6b29334d83e438b4fe379d21c0cb8e/virtualenv-16.7.5-py2.py3-none-any.whl Collecting yamllint (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 9)) Using cached https://files.pythonhosted.org/packages/39/b5/390c956b1aad9a0de18cffa94dba8610b9eca4bd142aa56746e31a388f14/yamllint-1.17.0-py2.py3-none-any.whl Collecting astroid==2.2.5 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 41)) Using cached https://files.pythonhosted.org/packages/d5/ad/7221a62a2dbce5c3b8c57fd18e1052c7331adc19b3f27f1561aa6e620db2/astroid-2.2.5-py3-none-any.whl Collecting mccabe==0.6.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 44)) Using cached https://files.pythonhosted.org/packages/87/89/479dc97e18549e21354893e4ee4ef36db1d237534982482c3681ee6e7b57/mccabe-0.6.1-py2.py3-none-any.whl Collecting isort==4.3.15 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 42)) Using cached https://files.pythonhosted.org/packages/b6/89/3137d13dd30a0d063435661950f6dfd50957532989e49aef652f490ef616/isort-4.3.15-py2.py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): asn1crypto>=0.21.0 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): six>=1.4.1 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): cffi!=1.11.3,>=1.8 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): MarkupSafe>=0.23 in /home/badger/.local/lib/python3.6/site-packages (from jinja2->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 2)) Requirement already satisfied (use --upgrade to upgrade): docutils>=0.7 in /home/badger/.local/lib/python3.6/site-packages (from rstcheck->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 6)) Collecting pathspec>=0.5.3 (from yamllint->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 9)) Using cached https://files.pythonhosted.org/packages/84/2a/bfee636b1e2f7d6e30dd74f49201ccfa5c3cf322d44929ecc6c137c486c5/pathspec-0.5.9.tar.gz Collecting typed-ast==1.4.0 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 46)) Using cached https://files.pythonhosted.org/packages/31/d3/9d1802c161626d0278bafb1ffb32f76b9d01e123881bbf9d91e8ccf28e18/typed_ast-1.4.0-cp36-cp36m-manylinux1_x86_64.whl Collecting wrapt==1.11.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 47)) Using cached https://files.pythonhosted.org/packages/67/b2/0f71ca90b0ade7fad27e3d20327c996c6252a2ffe88f50a95bba7434eda9/wrapt-1.11.1.tar.gz Collecting lazy-object-proxy==1.3.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 43)) Using cached https://files.pythonhosted.org/packages/65/1f/2043ec33066e779905ed7e6580384425fdc7dc2ac64d6931060c75b0c5a3/lazy_object_proxy-1.3.1-cp36-cp36m-manylinux1_x86_64.whl Requirement already satisfied (use --upgrade to upgrade): pycparser in /home/badger/.local/lib/python3.6/site-packages (from cffi!=1.11.3,>=1.8->cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Installing collected packages: voluptuous, typed-ast, wrapt, lazy-object-proxy, astroid, isort, mccabe, pylint, pycodestyle, virtualenv, pathspec, yamllint Running setup.py install for voluptuous ... error Complete output from command /var/tmp/tmpi3kn3062/python3.6 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-n_ufdj0i/voluptuous/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-nzpu8u3y-record/install-record.txt --single-version-externally-managed --compile: running install running build running build_py creating build creating build/lib creating build/lib/voluptuous copying voluptuous/__init__.py -> build/lib/voluptuous copying voluptuous/error.py -> build/lib/voluptuous copying voluptuous/humanize.py -> build/lib/voluptuous copying voluptuous/schema_builder.py -> build/lib/voluptuous copying voluptuous/util.py -> build/lib/voluptuous copying voluptuous/validators.py -> build/lib/voluptuous running install_lib creating /usr/local/lib/python3.6 error: could not create '/usr/local/lib/python3.6': Permission denied ---------------------------------------- Command "/var/tmp/tmpi3kn3062/python3.6 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-n_ufdj0i/voluptuous/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-nzpu8u3y-record/install-record.txt --single-version-externally-managed --compile" failed with error code 1 in /tmp/pip-build-n_ufdj0i/voluptuous/ ERROR: Command "/var/tmp/tmpi3kn3062/python3.6 -m pip.__main__ install --disable-pip-version-check -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt" returned exit status 1. ERROR: Command "/usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/home/badger/ansible LC_ALL=en_US.UTF-8 /var/tmp/tmpi3kn3062/python3.6 /home/badger/.local/bin/ansible-test sanity --test pep8 lib/ansible/module_utils/basic.py --metadata metadata-c4as9ur5.json --truncate 117 --color yes --requirements" returned exit status 1. ```
https://github.com/ansible/ansible/issues/61951
https://github.com/ansible/ansible/pull/62033
e3ea89801bae73921b298e7b4628860a272e942c
c77ab110514900ee439c7281a1d1dd14504cf44a
2019-09-06T18:15:00Z
python
2019-09-10T01:32:29Z
changelogs/fragments/ansible-test-sanity-requirements.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,951
ansible-test --venv attempts to install to system locations
##### SUMMARY After installing ansible-2.90beta1 via pip, ansible-test --venv tries to install to system locations, getting permission denied ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9.0beta1 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> * I installed this on CENTOS7 using the python3.6 package * Installed 2.9.0beta1 from pypi ##### STEPS TO REPRODUCE ``` $ python3.6 -m pip install --user ansible==2.9.0beta1 $ ansible --version ansible 2.9.0b1 config file = None configured module search path = ['/home/badger/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/badger/.local/lib/python3.6/site-packages/ansible executable location = /home/badger/.local/bin/ansible python version = 3.6.8 (default, Apr 25 2019, 21:02:35) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] $ ansible-test sanity --venv --test pep8 lib/ansible/module_utils/basic.py ``` ##### EXPECTED RESULTS ansible-test installs a venv with the required packages for the sanity test and then runs the test on lib/ansible/module_utils/basic.py ##### ACTUAL RESULTS ``` Ignoring coverage: markers "python_version > '3.7'" don't match your environment Ignoring cryptography: markers "python_version < '2.7'" don't match your environment Ignoring deepdiff: markers "python_version < '3'" don't match your environment Ignoring urllib3: markers "python_version < '2.7'" don't match your environment Ignoring sphinx: markers "python_version < '2.7'" don't match your environment Ignoring wheel: markers "python_version < '2.7'" don't match your environment Ignoring yamllint: markers "python_version < '2.7'" don't match your environment Ignoring paramiko: markers "python_version < '2.7'" don't match your environment Ignoring pytest: markers "python_version < '2.7'" don't match your environment Ignoring pytest: markers "python_version == '2.7'" don't match your environment Ignoring pytest-forked: markers "python_version < '2.7'" don't match your environment Ignoring requests: markers "python_version < '2.7'" don't match your environment Ignoring virtualenv: markers "python_version < '2.7'" don't match your environment Ignoring pyopenssl: markers "python_version < '2.7'" don't match your environment Ignoring pyyaml: markers "python_version < '2.7'" don't match your environment Ignoring pycparser: markers "python_version < '2.7'" don't match your environment Ignoring xmltodict: markers "python_version < '2.7'" don't match your environment Ignoring lxml: markers "python_version < '2.7'" don't match your environment Ignoring pyvmomi: markers "python_version < '2.7'" don't match your environment Collecting voluptuous>=0.11.0 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 25)) Using cached https://files.pythonhosted.org/packages/24/3b/fe531688c0d9e057fccc0bc9430c0a3d4b90e0d2f015326e659c2944e328/voluptuous-0.11.7.tar.gz Collecting pylint==2.3.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 45)) Using cached https://files.pythonhosted.org/packages/60/c2/b3f73f4ac008bef6e75bca4992f3963b3f85942e0277237721ef1c151f0d/pylint-2.3.1-py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): cryptography in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): jinja2 in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 2)) Collecting pycodestyle (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 3)) Using cached https://files.pythonhosted.org/packages/0e/0c/04a353e104d2f324f8ee5f4b32012618c1c86dd79e52a433b64fceed511b/pycodestyle-2.5.0-py2.py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): pyyaml in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 5)) Requirement already satisfied (use --upgrade to upgrade): rstcheck in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 6)) Collecting virtualenv (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 7)) Using cached https://files.pythonhosted.org/packages/8b/12/8d4f45b8962b03ac9efefe5ed5053f6b29334d83e438b4fe379d21c0cb8e/virtualenv-16.7.5-py2.py3-none-any.whl Collecting yamllint (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 9)) Using cached https://files.pythonhosted.org/packages/39/b5/390c956b1aad9a0de18cffa94dba8610b9eca4bd142aa56746e31a388f14/yamllint-1.17.0-py2.py3-none-any.whl Collecting astroid==2.2.5 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 41)) Using cached https://files.pythonhosted.org/packages/d5/ad/7221a62a2dbce5c3b8c57fd18e1052c7331adc19b3f27f1561aa6e620db2/astroid-2.2.5-py3-none-any.whl Collecting mccabe==0.6.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 44)) Using cached https://files.pythonhosted.org/packages/87/89/479dc97e18549e21354893e4ee4ef36db1d237534982482c3681ee6e7b57/mccabe-0.6.1-py2.py3-none-any.whl Collecting isort==4.3.15 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 42)) Using cached https://files.pythonhosted.org/packages/b6/89/3137d13dd30a0d063435661950f6dfd50957532989e49aef652f490ef616/isort-4.3.15-py2.py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): asn1crypto>=0.21.0 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): six>=1.4.1 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): cffi!=1.11.3,>=1.8 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): MarkupSafe>=0.23 in /home/badger/.local/lib/python3.6/site-packages (from jinja2->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 2)) Requirement already satisfied (use --upgrade to upgrade): docutils>=0.7 in /home/badger/.local/lib/python3.6/site-packages (from rstcheck->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 6)) Collecting pathspec>=0.5.3 (from yamllint->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 9)) Using cached https://files.pythonhosted.org/packages/84/2a/bfee636b1e2f7d6e30dd74f49201ccfa5c3cf322d44929ecc6c137c486c5/pathspec-0.5.9.tar.gz Collecting typed-ast==1.4.0 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 46)) Using cached https://files.pythonhosted.org/packages/31/d3/9d1802c161626d0278bafb1ffb32f76b9d01e123881bbf9d91e8ccf28e18/typed_ast-1.4.0-cp36-cp36m-manylinux1_x86_64.whl Collecting wrapt==1.11.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 47)) Using cached https://files.pythonhosted.org/packages/67/b2/0f71ca90b0ade7fad27e3d20327c996c6252a2ffe88f50a95bba7434eda9/wrapt-1.11.1.tar.gz Collecting lazy-object-proxy==1.3.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 43)) Using cached https://files.pythonhosted.org/packages/65/1f/2043ec33066e779905ed7e6580384425fdc7dc2ac64d6931060c75b0c5a3/lazy_object_proxy-1.3.1-cp36-cp36m-manylinux1_x86_64.whl Requirement already satisfied (use --upgrade to upgrade): pycparser in /home/badger/.local/lib/python3.6/site-packages (from cffi!=1.11.3,>=1.8->cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Installing collected packages: voluptuous, typed-ast, wrapt, lazy-object-proxy, astroid, isort, mccabe, pylint, pycodestyle, virtualenv, pathspec, yamllint Running setup.py install for voluptuous ... error Complete output from command /var/tmp/tmpi3kn3062/python3.6 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-n_ufdj0i/voluptuous/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-nzpu8u3y-record/install-record.txt --single-version-externally-managed --compile: running install running build running build_py creating build creating build/lib creating build/lib/voluptuous copying voluptuous/__init__.py -> build/lib/voluptuous copying voluptuous/error.py -> build/lib/voluptuous copying voluptuous/humanize.py -> build/lib/voluptuous copying voluptuous/schema_builder.py -> build/lib/voluptuous copying voluptuous/util.py -> build/lib/voluptuous copying voluptuous/validators.py -> build/lib/voluptuous running install_lib creating /usr/local/lib/python3.6 error: could not create '/usr/local/lib/python3.6': Permission denied ---------------------------------------- Command "/var/tmp/tmpi3kn3062/python3.6 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-n_ufdj0i/voluptuous/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-nzpu8u3y-record/install-record.txt --single-version-externally-managed --compile" failed with error code 1 in /tmp/pip-build-n_ufdj0i/voluptuous/ ERROR: Command "/var/tmp/tmpi3kn3062/python3.6 -m pip.__main__ install --disable-pip-version-check -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt" returned exit status 1. ERROR: Command "/usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/home/badger/ansible LC_ALL=en_US.UTF-8 /var/tmp/tmpi3kn3062/python3.6 /home/badger/.local/bin/ansible-test sanity --test pep8 lib/ansible/module_utils/basic.py --metadata metadata-c4as9ur5.json --truncate 117 --color yes --requirements" returned exit status 1. ```
https://github.com/ansible/ansible/issues/61951
https://github.com/ansible/ansible/pull/62033
e3ea89801bae73921b298e7b4628860a272e942c
c77ab110514900ee439c7281a1d1dd14504cf44a
2019-09-06T18:15:00Z
python
2019-09-10T01:32:29Z
changelogs/fragments/ansible-test-venv-activation.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,951
ansible-test --venv attempts to install to system locations
##### SUMMARY After installing ansible-2.90beta1 via pip, ansible-test --venv tries to install to system locations, getting permission denied ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9.0beta1 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> * I installed this on CENTOS7 using the python3.6 package * Installed 2.9.0beta1 from pypi ##### STEPS TO REPRODUCE ``` $ python3.6 -m pip install --user ansible==2.9.0beta1 $ ansible --version ansible 2.9.0b1 config file = None configured module search path = ['/home/badger/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/badger/.local/lib/python3.6/site-packages/ansible executable location = /home/badger/.local/bin/ansible python version = 3.6.8 (default, Apr 25 2019, 21:02:35) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] $ ansible-test sanity --venv --test pep8 lib/ansible/module_utils/basic.py ``` ##### EXPECTED RESULTS ansible-test installs a venv with the required packages for the sanity test and then runs the test on lib/ansible/module_utils/basic.py ##### ACTUAL RESULTS ``` Ignoring coverage: markers "python_version > '3.7'" don't match your environment Ignoring cryptography: markers "python_version < '2.7'" don't match your environment Ignoring deepdiff: markers "python_version < '3'" don't match your environment Ignoring urllib3: markers "python_version < '2.7'" don't match your environment Ignoring sphinx: markers "python_version < '2.7'" don't match your environment Ignoring wheel: markers "python_version < '2.7'" don't match your environment Ignoring yamllint: markers "python_version < '2.7'" don't match your environment Ignoring paramiko: markers "python_version < '2.7'" don't match your environment Ignoring pytest: markers "python_version < '2.7'" don't match your environment Ignoring pytest: markers "python_version == '2.7'" don't match your environment Ignoring pytest-forked: markers "python_version < '2.7'" don't match your environment Ignoring requests: markers "python_version < '2.7'" don't match your environment Ignoring virtualenv: markers "python_version < '2.7'" don't match your environment Ignoring pyopenssl: markers "python_version < '2.7'" don't match your environment Ignoring pyyaml: markers "python_version < '2.7'" don't match your environment Ignoring pycparser: markers "python_version < '2.7'" don't match your environment Ignoring xmltodict: markers "python_version < '2.7'" don't match your environment Ignoring lxml: markers "python_version < '2.7'" don't match your environment Ignoring pyvmomi: markers "python_version < '2.7'" don't match your environment Collecting voluptuous>=0.11.0 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 25)) Using cached https://files.pythonhosted.org/packages/24/3b/fe531688c0d9e057fccc0bc9430c0a3d4b90e0d2f015326e659c2944e328/voluptuous-0.11.7.tar.gz Collecting pylint==2.3.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 45)) Using cached https://files.pythonhosted.org/packages/60/c2/b3f73f4ac008bef6e75bca4992f3963b3f85942e0277237721ef1c151f0d/pylint-2.3.1-py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): cryptography in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): jinja2 in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 2)) Collecting pycodestyle (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 3)) Using cached https://files.pythonhosted.org/packages/0e/0c/04a353e104d2f324f8ee5f4b32012618c1c86dd79e52a433b64fceed511b/pycodestyle-2.5.0-py2.py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): pyyaml in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 5)) Requirement already satisfied (use --upgrade to upgrade): rstcheck in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 6)) Collecting virtualenv (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 7)) Using cached https://files.pythonhosted.org/packages/8b/12/8d4f45b8962b03ac9efefe5ed5053f6b29334d83e438b4fe379d21c0cb8e/virtualenv-16.7.5-py2.py3-none-any.whl Collecting yamllint (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 9)) Using cached https://files.pythonhosted.org/packages/39/b5/390c956b1aad9a0de18cffa94dba8610b9eca4bd142aa56746e31a388f14/yamllint-1.17.0-py2.py3-none-any.whl Collecting astroid==2.2.5 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 41)) Using cached https://files.pythonhosted.org/packages/d5/ad/7221a62a2dbce5c3b8c57fd18e1052c7331adc19b3f27f1561aa6e620db2/astroid-2.2.5-py3-none-any.whl Collecting mccabe==0.6.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 44)) Using cached https://files.pythonhosted.org/packages/87/89/479dc97e18549e21354893e4ee4ef36db1d237534982482c3681ee6e7b57/mccabe-0.6.1-py2.py3-none-any.whl Collecting isort==4.3.15 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 42)) Using cached https://files.pythonhosted.org/packages/b6/89/3137d13dd30a0d063435661950f6dfd50957532989e49aef652f490ef616/isort-4.3.15-py2.py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): asn1crypto>=0.21.0 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): six>=1.4.1 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): cffi!=1.11.3,>=1.8 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): MarkupSafe>=0.23 in /home/badger/.local/lib/python3.6/site-packages (from jinja2->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 2)) Requirement already satisfied (use --upgrade to upgrade): docutils>=0.7 in /home/badger/.local/lib/python3.6/site-packages (from rstcheck->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 6)) Collecting pathspec>=0.5.3 (from yamllint->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 9)) Using cached https://files.pythonhosted.org/packages/84/2a/bfee636b1e2f7d6e30dd74f49201ccfa5c3cf322d44929ecc6c137c486c5/pathspec-0.5.9.tar.gz Collecting typed-ast==1.4.0 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 46)) Using cached https://files.pythonhosted.org/packages/31/d3/9d1802c161626d0278bafb1ffb32f76b9d01e123881bbf9d91e8ccf28e18/typed_ast-1.4.0-cp36-cp36m-manylinux1_x86_64.whl Collecting wrapt==1.11.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 47)) Using cached https://files.pythonhosted.org/packages/67/b2/0f71ca90b0ade7fad27e3d20327c996c6252a2ffe88f50a95bba7434eda9/wrapt-1.11.1.tar.gz Collecting lazy-object-proxy==1.3.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 43)) Using cached https://files.pythonhosted.org/packages/65/1f/2043ec33066e779905ed7e6580384425fdc7dc2ac64d6931060c75b0c5a3/lazy_object_proxy-1.3.1-cp36-cp36m-manylinux1_x86_64.whl Requirement already satisfied (use --upgrade to upgrade): pycparser in /home/badger/.local/lib/python3.6/site-packages (from cffi!=1.11.3,>=1.8->cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Installing collected packages: voluptuous, typed-ast, wrapt, lazy-object-proxy, astroid, isort, mccabe, pylint, pycodestyle, virtualenv, pathspec, yamllint Running setup.py install for voluptuous ... error Complete output from command /var/tmp/tmpi3kn3062/python3.6 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-n_ufdj0i/voluptuous/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-nzpu8u3y-record/install-record.txt --single-version-externally-managed --compile: running install running build running build_py creating build creating build/lib creating build/lib/voluptuous copying voluptuous/__init__.py -> build/lib/voluptuous copying voluptuous/error.py -> build/lib/voluptuous copying voluptuous/humanize.py -> build/lib/voluptuous copying voluptuous/schema_builder.py -> build/lib/voluptuous copying voluptuous/util.py -> build/lib/voluptuous copying voluptuous/validators.py -> build/lib/voluptuous running install_lib creating /usr/local/lib/python3.6 error: could not create '/usr/local/lib/python3.6': Permission denied ---------------------------------------- Command "/var/tmp/tmpi3kn3062/python3.6 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-n_ufdj0i/voluptuous/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-nzpu8u3y-record/install-record.txt --single-version-externally-managed --compile" failed with error code 1 in /tmp/pip-build-n_ufdj0i/voluptuous/ ERROR: Command "/var/tmp/tmpi3kn3062/python3.6 -m pip.__main__ install --disable-pip-version-check -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt" returned exit status 1. ERROR: Command "/usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/home/badger/ansible LC_ALL=en_US.UTF-8 /var/tmp/tmpi3kn3062/python3.6 /home/badger/.local/bin/ansible-test sanity --test pep8 lib/ansible/module_utils/basic.py --metadata metadata-c4as9ur5.json --truncate 117 --color yes --requirements" returned exit status 1. ```
https://github.com/ansible/ansible/issues/61951
https://github.com/ansible/ansible/pull/62033
e3ea89801bae73921b298e7b4628860a272e942c
c77ab110514900ee439c7281a1d1dd14504cf44a
2019-09-06T18:15:00Z
python
2019-09-10T01:32:29Z
changelogs/fragments/ansible-test-venv-pythonpath.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,951
ansible-test --venv attempts to install to system locations
##### SUMMARY After installing ansible-2.90beta1 via pip, ansible-test --venv tries to install to system locations, getting permission denied ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9.0beta1 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> * I installed this on CENTOS7 using the python3.6 package * Installed 2.9.0beta1 from pypi ##### STEPS TO REPRODUCE ``` $ python3.6 -m pip install --user ansible==2.9.0beta1 $ ansible --version ansible 2.9.0b1 config file = None configured module search path = ['/home/badger/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/badger/.local/lib/python3.6/site-packages/ansible executable location = /home/badger/.local/bin/ansible python version = 3.6.8 (default, Apr 25 2019, 21:02:35) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] $ ansible-test sanity --venv --test pep8 lib/ansible/module_utils/basic.py ``` ##### EXPECTED RESULTS ansible-test installs a venv with the required packages for the sanity test and then runs the test on lib/ansible/module_utils/basic.py ##### ACTUAL RESULTS ``` Ignoring coverage: markers "python_version > '3.7'" don't match your environment Ignoring cryptography: markers "python_version < '2.7'" don't match your environment Ignoring deepdiff: markers "python_version < '3'" don't match your environment Ignoring urllib3: markers "python_version < '2.7'" don't match your environment Ignoring sphinx: markers "python_version < '2.7'" don't match your environment Ignoring wheel: markers "python_version < '2.7'" don't match your environment Ignoring yamllint: markers "python_version < '2.7'" don't match your environment Ignoring paramiko: markers "python_version < '2.7'" don't match your environment Ignoring pytest: markers "python_version < '2.7'" don't match your environment Ignoring pytest: markers "python_version == '2.7'" don't match your environment Ignoring pytest-forked: markers "python_version < '2.7'" don't match your environment Ignoring requests: markers "python_version < '2.7'" don't match your environment Ignoring virtualenv: markers "python_version < '2.7'" don't match your environment Ignoring pyopenssl: markers "python_version < '2.7'" don't match your environment Ignoring pyyaml: markers "python_version < '2.7'" don't match your environment Ignoring pycparser: markers "python_version < '2.7'" don't match your environment Ignoring xmltodict: markers "python_version < '2.7'" don't match your environment Ignoring lxml: markers "python_version < '2.7'" don't match your environment Ignoring pyvmomi: markers "python_version < '2.7'" don't match your environment Collecting voluptuous>=0.11.0 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 25)) Using cached https://files.pythonhosted.org/packages/24/3b/fe531688c0d9e057fccc0bc9430c0a3d4b90e0d2f015326e659c2944e328/voluptuous-0.11.7.tar.gz Collecting pylint==2.3.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 45)) Using cached https://files.pythonhosted.org/packages/60/c2/b3f73f4ac008bef6e75bca4992f3963b3f85942e0277237721ef1c151f0d/pylint-2.3.1-py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): cryptography in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): jinja2 in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 2)) Collecting pycodestyle (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 3)) Using cached https://files.pythonhosted.org/packages/0e/0c/04a353e104d2f324f8ee5f4b32012618c1c86dd79e52a433b64fceed511b/pycodestyle-2.5.0-py2.py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): pyyaml in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 5)) Requirement already satisfied (use --upgrade to upgrade): rstcheck in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 6)) Collecting virtualenv (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 7)) Using cached https://files.pythonhosted.org/packages/8b/12/8d4f45b8962b03ac9efefe5ed5053f6b29334d83e438b4fe379d21c0cb8e/virtualenv-16.7.5-py2.py3-none-any.whl Collecting yamllint (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 9)) Using cached https://files.pythonhosted.org/packages/39/b5/390c956b1aad9a0de18cffa94dba8610b9eca4bd142aa56746e31a388f14/yamllint-1.17.0-py2.py3-none-any.whl Collecting astroid==2.2.5 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 41)) Using cached https://files.pythonhosted.org/packages/d5/ad/7221a62a2dbce5c3b8c57fd18e1052c7331adc19b3f27f1561aa6e620db2/astroid-2.2.5-py3-none-any.whl Collecting mccabe==0.6.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 44)) Using cached https://files.pythonhosted.org/packages/87/89/479dc97e18549e21354893e4ee4ef36db1d237534982482c3681ee6e7b57/mccabe-0.6.1-py2.py3-none-any.whl Collecting isort==4.3.15 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 42)) Using cached https://files.pythonhosted.org/packages/b6/89/3137d13dd30a0d063435661950f6dfd50957532989e49aef652f490ef616/isort-4.3.15-py2.py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): asn1crypto>=0.21.0 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): six>=1.4.1 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): cffi!=1.11.3,>=1.8 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): MarkupSafe>=0.23 in /home/badger/.local/lib/python3.6/site-packages (from jinja2->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 2)) Requirement already satisfied (use --upgrade to upgrade): docutils>=0.7 in /home/badger/.local/lib/python3.6/site-packages (from rstcheck->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 6)) Collecting pathspec>=0.5.3 (from yamllint->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 9)) Using cached https://files.pythonhosted.org/packages/84/2a/bfee636b1e2f7d6e30dd74f49201ccfa5c3cf322d44929ecc6c137c486c5/pathspec-0.5.9.tar.gz Collecting typed-ast==1.4.0 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 46)) Using cached https://files.pythonhosted.org/packages/31/d3/9d1802c161626d0278bafb1ffb32f76b9d01e123881bbf9d91e8ccf28e18/typed_ast-1.4.0-cp36-cp36m-manylinux1_x86_64.whl Collecting wrapt==1.11.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 47)) Using cached https://files.pythonhosted.org/packages/67/b2/0f71ca90b0ade7fad27e3d20327c996c6252a2ffe88f50a95bba7434eda9/wrapt-1.11.1.tar.gz Collecting lazy-object-proxy==1.3.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 43)) Using cached https://files.pythonhosted.org/packages/65/1f/2043ec33066e779905ed7e6580384425fdc7dc2ac64d6931060c75b0c5a3/lazy_object_proxy-1.3.1-cp36-cp36m-manylinux1_x86_64.whl Requirement already satisfied (use --upgrade to upgrade): pycparser in /home/badger/.local/lib/python3.6/site-packages (from cffi!=1.11.3,>=1.8->cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Installing collected packages: voluptuous, typed-ast, wrapt, lazy-object-proxy, astroid, isort, mccabe, pylint, pycodestyle, virtualenv, pathspec, yamllint Running setup.py install for voluptuous ... error Complete output from command /var/tmp/tmpi3kn3062/python3.6 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-n_ufdj0i/voluptuous/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-nzpu8u3y-record/install-record.txt --single-version-externally-managed --compile: running install running build running build_py creating build creating build/lib creating build/lib/voluptuous copying voluptuous/__init__.py -> build/lib/voluptuous copying voluptuous/error.py -> build/lib/voluptuous copying voluptuous/humanize.py -> build/lib/voluptuous copying voluptuous/schema_builder.py -> build/lib/voluptuous copying voluptuous/util.py -> build/lib/voluptuous copying voluptuous/validators.py -> build/lib/voluptuous running install_lib creating /usr/local/lib/python3.6 error: could not create '/usr/local/lib/python3.6': Permission denied ---------------------------------------- Command "/var/tmp/tmpi3kn3062/python3.6 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-n_ufdj0i/voluptuous/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-nzpu8u3y-record/install-record.txt --single-version-externally-managed --compile" failed with error code 1 in /tmp/pip-build-n_ufdj0i/voluptuous/ ERROR: Command "/var/tmp/tmpi3kn3062/python3.6 -m pip.__main__ install --disable-pip-version-check -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt" returned exit status 1. ERROR: Command "/usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/home/badger/ansible LC_ALL=en_US.UTF-8 /var/tmp/tmpi3kn3062/python3.6 /home/badger/.local/bin/ansible-test sanity --test pep8 lib/ansible/module_utils/basic.py --metadata metadata-c4as9ur5.json --truncate 117 --color yes --requirements" returned exit status 1. ```
https://github.com/ansible/ansible/issues/61951
https://github.com/ansible/ansible/pull/62033
e3ea89801bae73921b298e7b4628860a272e942c
c77ab110514900ee439c7281a1d1dd14504cf44a
2019-09-06T18:15:00Z
python
2019-09-10T01:32:29Z
test/lib/ansible_test/_internal/delegation.py
"""Delegate test execution to another environment.""" from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import re import sys import tempfile from . import types as t from .executor import ( SUPPORTED_PYTHON_VERSIONS, HTTPTESTER_HOSTS, create_shell_command, run_httptester, start_httptester, get_python_interpreter, get_python_version, get_docker_completion, get_remote_completion, ) from .config import ( TestConfig, EnvironmentConfig, IntegrationConfig, WindowsIntegrationConfig, NetworkIntegrationConfig, ShellConfig, SanityConfig, UnitsConfig, ) from .core_ci import ( AnsibleCoreCI, ) from .manage_ci import ( ManagePosixCI, ManageWindowsCI, ) from .util import ( ApplicationError, common_environment, pass_vars, display, ANSIBLE_BIN_PATH, ANSIBLE_TEST_DATA_ROOT, tempdir, ) from .util_common import ( run_command, ResultType, ) from .docker_util import ( docker_exec, docker_get, docker_pull, docker_put, docker_rm, docker_run, docker_available, docker_network_disconnect, get_docker_networks, ) from .cloud import ( get_cloud_providers, ) from .target import ( IntegrationTarget, ) from .data import ( data_context, ) from .payload import ( create_payload, ) from .venv import ( create_virtual_environment, ) def check_delegation_args(args): """ :type args: CommonConfig """ if not isinstance(args, EnvironmentConfig): return if args.docker: get_python_version(args, get_docker_completion(), args.docker_raw) elif args.remote: get_python_version(args, get_remote_completion(), args.remote) def delegate(args, exclude, require, integration_targets): """ :type args: EnvironmentConfig :type exclude: list[str] :type require: list[str] :type integration_targets: tuple[IntegrationTarget] :rtype: bool """ if isinstance(args, TestConfig): with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=data_context().content.root) as metadata_fd: args.metadata_path = os.path.basename(metadata_fd.name) args.metadata.to_file(args.metadata_path) try: return delegate_command(args, exclude, require, integration_targets) finally: args.metadata_path = None else: return delegate_command(args, exclude, require, integration_targets) def delegate_command(args, exclude, require, integration_targets): """ :type args: EnvironmentConfig :type exclude: list[str] :type require: list[str] :type integration_targets: tuple[IntegrationTarget] :rtype: bool """ if args.venv: delegate_venv(args, exclude, require, integration_targets) return True if args.tox: delegate_tox(args, exclude, require, integration_targets) return True if args.docker: delegate_docker(args, exclude, require, integration_targets) return True if args.remote: delegate_remote(args, exclude, require, integration_targets) return True return False def delegate_tox(args, exclude, require, integration_targets): """ :type args: EnvironmentConfig :type exclude: list[str] :type require: list[str] :type integration_targets: tuple[IntegrationTarget] """ if args.python: versions = (args.python_version,) if args.python_version not in SUPPORTED_PYTHON_VERSIONS: raise ApplicationError('tox does not support Python version %s' % args.python_version) else: versions = SUPPORTED_PYTHON_VERSIONS if args.httptester: needs_httptester = sorted(target.name for target in integration_targets if 'needs/httptester/' in target.aliases) if needs_httptester: display.warning('Use --docker or --remote to enable httptester for tests marked "needs/httptester": %s' % ', '.join(needs_httptester)) options = { '--tox': args.tox_args, '--tox-sitepackages': 0, } for version in versions: tox = ['tox', '-c', os.path.join(ANSIBLE_TEST_DATA_ROOT, 'tox.ini'), '-e', 'py' + version.replace('.', '')] if args.tox_sitepackages: tox.append('--sitepackages') tox.append('--') cmd = generate_command(args, None, ANSIBLE_BIN_PATH, data_context().content.root, options, exclude, require) if not args.python: cmd += ['--python', version] # newer versions of tox do not support older python versions and will silently fall back to a different version # passing this option will allow the delegated ansible-test to verify it is running under the expected python version # tox 3.0.0 dropped official python 2.6 support: https://tox.readthedocs.io/en/latest/changelog.html#v3-0-0-2018-04-02 # tox 3.1.3 is the first version to support python 3.8 and later: https://tox.readthedocs.io/en/latest/changelog.html#v3-1-3-2018-08-03 # tox 3.1.3 appears to still work with python 2.6, making it a good version to use when supporting all python versions we use # virtualenv 16.0.0 dropped python 2.6 support: https://virtualenv.pypa.io/en/latest/changes/#v16-0-0-2018-05-16 cmd += ['--check-python', version] if isinstance(args, TestConfig): if args.coverage and not args.coverage_label: cmd += ['--coverage-label', 'tox-%s' % version] env = common_environment() # temporary solution to permit ansible-test delegated to tox to provision remote resources optional = ( 'SHIPPABLE', 'SHIPPABLE_BUILD_ID', 'SHIPPABLE_JOB_NUMBER', ) env.update(pass_vars(required=[], optional=optional)) run_command(args, tox + cmd, env=env) def delegate_venv(args, # type: EnvironmentConfig exclude, # type: t.List[str] require, # type: t.List[str] integration_targets, # type: t.Tuple[IntegrationTarget, ...] ): # type: (...) -> None """Delegate ansible-test execution to a virtual environment using venv or virtualenv.""" if args.python: versions = (args.python_version,) else: versions = SUPPORTED_PYTHON_VERSIONS if args.httptester: needs_httptester = sorted(target.name for target in integration_targets if 'needs/httptester/' in target.aliases) if needs_httptester: display.warning('Use --docker or --remote to enable httptester for tests marked "needs/httptester": %s' % ', '.join(needs_httptester)) venvs = dict((version, os.path.join(ResultType.TMP.path, 'delegation', 'python%s' % version)) for version in versions) venvs = dict((version, path) for version, path in venvs.items() if create_virtual_environment(args, version, path)) if not venvs: raise ApplicationError('No usable virtual environment support found.') options = { '--venv': 0, } with tempdir() as inject_path: for version, path in venvs.items(): os.symlink(os.path.join(path, 'bin', 'python'), os.path.join(inject_path, 'python%s' % version)) python_interpreter = os.path.join(inject_path, 'python%s' % args.python_version) cmd = generate_command(args, python_interpreter, ANSIBLE_BIN_PATH, data_context().content.root, options, exclude, require) if isinstance(args, TestConfig): if args.coverage and not args.coverage_label: cmd += ['--coverage-label', 'venv'] env = common_environment() env.update( PATH=inject_path + os.pathsep + env['PATH'], ) run_command(args, cmd, env=env) def delegate_docker(args, exclude, require, integration_targets): """ :type args: EnvironmentConfig :type exclude: list[str] :type require: list[str] :type integration_targets: tuple[IntegrationTarget] """ test_image = args.docker privileged = args.docker_privileged if isinstance(args, ShellConfig): use_httptester = args.httptester else: use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets) if use_httptester: docker_pull(args, args.httptester) docker_pull(args, test_image) httptester_id = None test_id = None options = { '--docker': 1, '--docker-privileged': 0, '--docker-util': 1, } python_interpreter = get_python_interpreter(args, get_docker_completion(), args.docker_raw) install_root = '/root/ansible' if data_context().content.collection: content_root = os.path.join(install_root, data_context().content.collection.directory) else: content_root = install_root remote_results_root = os.path.join(content_root, data_context().content.results_path) cmd = generate_command(args, python_interpreter, os.path.join(install_root, 'bin'), content_root, options, exclude, require) if isinstance(args, TestConfig): if args.coverage and not args.coverage_label: image_label = args.docker_raw image_label = re.sub('[^a-zA-Z0-9]+', '-', image_label) cmd += ['--coverage-label', 'docker-%s' % image_label] if isinstance(args, IntegrationConfig): if not args.allow_destructive: cmd.append('--allow-destructive') cmd_options = [] if isinstance(args, ShellConfig) or (isinstance(args, IntegrationConfig) and args.debug_strategy): cmd_options.append('-it') with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as local_source_fd: try: create_payload(args, local_source_fd.name) if use_httptester: httptester_id = run_httptester(args) else: httptester_id = None test_options = [ '--detach', '--volume', '/sys/fs/cgroup:/sys/fs/cgroup:ro', '--privileged=%s' % str(privileged).lower(), ] if args.docker_memory: test_options.extend([ '--memory=%d' % args.docker_memory, '--memory-swap=%d' % args.docker_memory, ]) docker_socket = '/var/run/docker.sock' if args.docker_seccomp != 'default': test_options += ['--security-opt', 'seccomp=%s' % args.docker_seccomp] if os.path.exists(docker_socket): test_options += ['--volume', '%s:%s' % (docker_socket, docker_socket)] if httptester_id: test_options += ['--env', 'HTTPTESTER=1'] for host in HTTPTESTER_HOSTS: test_options += ['--link', '%s:%s' % (httptester_id, host)] if isinstance(args, IntegrationConfig): cloud_platforms = get_cloud_providers(args) for cloud_platform in cloud_platforms: test_options += cloud_platform.get_docker_run_options() test_id = docker_run(args, test_image, options=test_options)[0] if args.explain: test_id = 'test_id' else: test_id = test_id.strip() # write temporary files to /root since /tmp isn't ready immediately on container start docker_put(args, test_id, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'docker.sh'), '/root/docker.sh') docker_exec(args, test_id, ['/bin/bash', '/root/docker.sh']) docker_put(args, test_id, local_source_fd.name, '/root/ansible.tgz') docker_exec(args, test_id, ['mkdir', '/root/ansible']) docker_exec(args, test_id, ['tar', 'oxzf', '/root/ansible.tgz', '-C', '/root/ansible']) # docker images are only expected to have a single python version available if isinstance(args, UnitsConfig) and not args.python: cmd += ['--python', 'default'] # run unit tests unprivileged to prevent stray writes to the source tree # also disconnect from the network once requirements have been installed if isinstance(args, UnitsConfig): writable_dirs = [ os.path.join(content_root, ResultType.JUNIT.relative_path), os.path.join(content_root, ResultType.COVERAGE.relative_path), ] docker_exec(args, test_id, ['mkdir', '-p'] + writable_dirs) docker_exec(args, test_id, ['chmod', '777'] + writable_dirs) docker_exec(args, test_id, ['chmod', '755', '/root']) docker_exec(args, test_id, ['chmod', '644', os.path.join(content_root, args.metadata_path)]) docker_exec(args, test_id, ['useradd', 'pytest', '--create-home']) docker_exec(args, test_id, cmd + ['--requirements-mode', 'only'], options=cmd_options) networks = get_docker_networks(args, test_id) for network in networks: docker_network_disconnect(args, test_id, network) cmd += ['--requirements-mode', 'skip'] cmd_options += ['--user', 'pytest'] try: docker_exec(args, test_id, cmd, options=cmd_options) finally: local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path)) remote_test_root = os.path.dirname(remote_results_root) remote_results_name = os.path.basename(remote_results_root) remote_temp_file = os.path.join('/root', remote_results_name + '.tgz') with tempfile.NamedTemporaryFile(prefix='ansible-result-', suffix='.tgz') as local_result_fd: docker_exec(args, test_id, ['tar', 'czf', remote_temp_file, '-C', remote_test_root, remote_results_name]) docker_get(args, test_id, remote_temp_file, local_result_fd.name) run_command(args, ['tar', 'oxzf', local_result_fd.name, '-C', local_test_root]) finally: if httptester_id: docker_rm(args, httptester_id) if test_id: docker_rm(args, test_id) def delegate_remote(args, exclude, require, integration_targets): """ :type args: EnvironmentConfig :type exclude: list[str] :type require: list[str] :type integration_targets: tuple[IntegrationTarget] """ parts = args.remote.split('/', 1) platform = parts[0] version = parts[1] core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider) success = False raw = False if isinstance(args, ShellConfig): use_httptester = args.httptester raw = args.raw else: use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets) if use_httptester and not docker_available(): display.warning('Assuming --disable-httptester since `docker` is not available.') use_httptester = False httptester_id = None ssh_options = [] content_root = None try: core_ci.start() if use_httptester: httptester_id, ssh_options = start_httptester(args) core_ci.wait() python_version = get_python_version(args, get_remote_completion(), args.remote) if platform == 'windows': # Windows doesn't need the ansible-test fluff, just run the SSH command manage = ManageWindowsCI(core_ci) manage.setup(python_version) cmd = ['powershell.exe'] elif raw: manage = ManagePosixCI(core_ci) manage.setup(python_version) cmd = create_shell_command(['bash']) else: manage = ManagePosixCI(core_ci) pwd = manage.setup(python_version) options = { '--remote': 1, } python_interpreter = get_python_interpreter(args, get_remote_completion(), args.remote) install_root = os.path.join(pwd, 'ansible') if data_context().content.collection: content_root = os.path.join(install_root, data_context().content.collection.directory) else: content_root = install_root cmd = generate_command(args, python_interpreter, os.path.join(install_root, 'bin'), content_root, options, exclude, require) if httptester_id: cmd += ['--inject-httptester'] if isinstance(args, TestConfig): if args.coverage and not args.coverage_label: cmd += ['--coverage-label', 'remote-%s-%s' % (platform, version)] if isinstance(args, IntegrationConfig): if not args.allow_destructive: cmd.append('--allow-destructive') # remote instances are only expected to have a single python version available if isinstance(args, UnitsConfig) and not args.python: cmd += ['--python', 'default'] if isinstance(args, IntegrationConfig): cloud_platforms = get_cloud_providers(args) for cloud_platform in cloud_platforms: ssh_options += cloud_platform.get_remote_ssh_options() try: manage.ssh(cmd, ssh_options) success = True finally: download = False if platform != 'windows': download = True if isinstance(args, ShellConfig): if args.raw: download = False if download and content_root: local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path)) remote_results_root = os.path.join(content_root, data_context().content.results_path) remote_results_name = os.path.basename(remote_results_root) remote_temp_path = os.path.join('/tmp', remote_results_name) manage.ssh('rm -rf {0} && cp -a {1} {0} && chmod -R a+r {0}'.format(remote_temp_path, remote_results_root)) manage.download(remote_temp_path, local_test_root) finally: if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success): core_ci.stop() if httptester_id: docker_rm(args, httptester_id) def generate_command(args, python_interpreter, ansible_bin_path, content_root, options, exclude, require): """ :type args: EnvironmentConfig :type python_interpreter: str | None :type ansible_bin_path: str :type content_root: str :type options: dict[str, int] :type exclude: list[str] :type require: list[str] :rtype: list[str] """ options['--color'] = 1 cmd = [os.path.join(ansible_bin_path, 'ansible-test')] if python_interpreter: cmd = [python_interpreter] + cmd # Force the encoding used during delegation. # This is only needed because ansible-test relies on Python's file system encoding. # Environments that do not have the locale configured are thus unable to work with unicode file paths. # Examples include FreeBSD and some Linux containers. env_vars = dict( LC_ALL='en_US.UTF-8', ANSIBLE_TEST_CONTENT_ROOT=content_root, ) env_args = ['%s=%s' % (key, env_vars[key]) for key in sorted(env_vars)] cmd = ['/usr/bin/env'] + env_args + cmd cmd += list(filter_options(args, sys.argv[1:], options, exclude, require)) cmd += ['--color', 'yes' if args.color else 'no'] if args.requirements: cmd += ['--requirements'] if isinstance(args, ShellConfig): cmd = create_shell_command(cmd) elif isinstance(args, SanityConfig): if args.base_branch: cmd += ['--base-branch', args.base_branch] return cmd def filter_options(args, argv, options, exclude, require): """ :type args: EnvironmentConfig :type argv: list[str] :type options: dict[str, int] :type exclude: list[str] :type require: list[str] :rtype: collections.Iterable[str] """ options = options.copy() options['--requirements'] = 0 options['--truncate'] = 1 options['--redact'] = 0 if isinstance(args, TestConfig): options.update({ '--changed': 0, '--tracked': 0, '--untracked': 0, '--ignore-committed': 0, '--ignore-staged': 0, '--ignore-unstaged': 0, '--changed-from': 1, '--changed-path': 1, '--metadata': 1, '--exclude': 1, '--require': 1, }) elif isinstance(args, SanityConfig): options.update({ '--base-branch': 1, }) if isinstance(args, (NetworkIntegrationConfig, WindowsIntegrationConfig)): options.update({ '--inventory': 1, }) remaining = 0 for arg in argv: if not arg.startswith('-') and remaining: remaining -= 1 continue remaining = 0 parts = arg.split('=', 1) key = parts[0] if key in options: remaining = options[key] - len(parts) + 1 continue yield arg for arg in args.delegate_args: yield arg for target in exclude: yield '--exclude' yield target for target in require: yield '--require' yield target if isinstance(args, TestConfig): if args.metadata_path: yield '--metadata' yield args.metadata_path yield '--truncate' yield '%d' % args.truncate if args.redact: yield '--redact'
closed
ansible/ansible
https://github.com/ansible/ansible
61,951
ansible-test --venv attempts to install to system locations
##### SUMMARY After installing ansible-2.90beta1 via pip, ansible-test --venv tries to install to system locations, getting permission denied ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9.0beta1 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> * I installed this on CENTOS7 using the python3.6 package * Installed 2.9.0beta1 from pypi ##### STEPS TO REPRODUCE ``` $ python3.6 -m pip install --user ansible==2.9.0beta1 $ ansible --version ansible 2.9.0b1 config file = None configured module search path = ['/home/badger/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/badger/.local/lib/python3.6/site-packages/ansible executable location = /home/badger/.local/bin/ansible python version = 3.6.8 (default, Apr 25 2019, 21:02:35) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] $ ansible-test sanity --venv --test pep8 lib/ansible/module_utils/basic.py ``` ##### EXPECTED RESULTS ansible-test installs a venv with the required packages for the sanity test and then runs the test on lib/ansible/module_utils/basic.py ##### ACTUAL RESULTS ``` Ignoring coverage: markers "python_version > '3.7'" don't match your environment Ignoring cryptography: markers "python_version < '2.7'" don't match your environment Ignoring deepdiff: markers "python_version < '3'" don't match your environment Ignoring urllib3: markers "python_version < '2.7'" don't match your environment Ignoring sphinx: markers "python_version < '2.7'" don't match your environment Ignoring wheel: markers "python_version < '2.7'" don't match your environment Ignoring yamllint: markers "python_version < '2.7'" don't match your environment Ignoring paramiko: markers "python_version < '2.7'" don't match your environment Ignoring pytest: markers "python_version < '2.7'" don't match your environment Ignoring pytest: markers "python_version == '2.7'" don't match your environment Ignoring pytest-forked: markers "python_version < '2.7'" don't match your environment Ignoring requests: markers "python_version < '2.7'" don't match your environment Ignoring virtualenv: markers "python_version < '2.7'" don't match your environment Ignoring pyopenssl: markers "python_version < '2.7'" don't match your environment Ignoring pyyaml: markers "python_version < '2.7'" don't match your environment Ignoring pycparser: markers "python_version < '2.7'" don't match your environment Ignoring xmltodict: markers "python_version < '2.7'" don't match your environment Ignoring lxml: markers "python_version < '2.7'" don't match your environment Ignoring pyvmomi: markers "python_version < '2.7'" don't match your environment Collecting voluptuous>=0.11.0 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 25)) Using cached https://files.pythonhosted.org/packages/24/3b/fe531688c0d9e057fccc0bc9430c0a3d4b90e0d2f015326e659c2944e328/voluptuous-0.11.7.tar.gz Collecting pylint==2.3.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 45)) Using cached https://files.pythonhosted.org/packages/60/c2/b3f73f4ac008bef6e75bca4992f3963b3f85942e0277237721ef1c151f0d/pylint-2.3.1-py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): cryptography in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): jinja2 in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 2)) Collecting pycodestyle (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 3)) Using cached https://files.pythonhosted.org/packages/0e/0c/04a353e104d2f324f8ee5f4b32012618c1c86dd79e52a433b64fceed511b/pycodestyle-2.5.0-py2.py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): pyyaml in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 5)) Requirement already satisfied (use --upgrade to upgrade): rstcheck in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 6)) Collecting virtualenv (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 7)) Using cached https://files.pythonhosted.org/packages/8b/12/8d4f45b8962b03ac9efefe5ed5053f6b29334d83e438b4fe379d21c0cb8e/virtualenv-16.7.5-py2.py3-none-any.whl Collecting yamllint (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 9)) Using cached https://files.pythonhosted.org/packages/39/b5/390c956b1aad9a0de18cffa94dba8610b9eca4bd142aa56746e31a388f14/yamllint-1.17.0-py2.py3-none-any.whl Collecting astroid==2.2.5 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 41)) Using cached https://files.pythonhosted.org/packages/d5/ad/7221a62a2dbce5c3b8c57fd18e1052c7331adc19b3f27f1561aa6e620db2/astroid-2.2.5-py3-none-any.whl Collecting mccabe==0.6.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 44)) Using cached https://files.pythonhosted.org/packages/87/89/479dc97e18549e21354893e4ee4ef36db1d237534982482c3681ee6e7b57/mccabe-0.6.1-py2.py3-none-any.whl Collecting isort==4.3.15 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 42)) Using cached https://files.pythonhosted.org/packages/b6/89/3137d13dd30a0d063435661950f6dfd50957532989e49aef652f490ef616/isort-4.3.15-py2.py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): asn1crypto>=0.21.0 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): six>=1.4.1 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): cffi!=1.11.3,>=1.8 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): MarkupSafe>=0.23 in /home/badger/.local/lib/python3.6/site-packages (from jinja2->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 2)) Requirement already satisfied (use --upgrade to upgrade): docutils>=0.7 in /home/badger/.local/lib/python3.6/site-packages (from rstcheck->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 6)) Collecting pathspec>=0.5.3 (from yamllint->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 9)) Using cached https://files.pythonhosted.org/packages/84/2a/bfee636b1e2f7d6e30dd74f49201ccfa5c3cf322d44929ecc6c137c486c5/pathspec-0.5.9.tar.gz Collecting typed-ast==1.4.0 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 46)) Using cached https://files.pythonhosted.org/packages/31/d3/9d1802c161626d0278bafb1ffb32f76b9d01e123881bbf9d91e8ccf28e18/typed_ast-1.4.0-cp36-cp36m-manylinux1_x86_64.whl Collecting wrapt==1.11.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 47)) Using cached https://files.pythonhosted.org/packages/67/b2/0f71ca90b0ade7fad27e3d20327c996c6252a2ffe88f50a95bba7434eda9/wrapt-1.11.1.tar.gz Collecting lazy-object-proxy==1.3.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 43)) Using cached https://files.pythonhosted.org/packages/65/1f/2043ec33066e779905ed7e6580384425fdc7dc2ac64d6931060c75b0c5a3/lazy_object_proxy-1.3.1-cp36-cp36m-manylinux1_x86_64.whl Requirement already satisfied (use --upgrade to upgrade): pycparser in /home/badger/.local/lib/python3.6/site-packages (from cffi!=1.11.3,>=1.8->cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Installing collected packages: voluptuous, typed-ast, wrapt, lazy-object-proxy, astroid, isort, mccabe, pylint, pycodestyle, virtualenv, pathspec, yamllint Running setup.py install for voluptuous ... error Complete output from command /var/tmp/tmpi3kn3062/python3.6 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-n_ufdj0i/voluptuous/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-nzpu8u3y-record/install-record.txt --single-version-externally-managed --compile: running install running build running build_py creating build creating build/lib creating build/lib/voluptuous copying voluptuous/__init__.py -> build/lib/voluptuous copying voluptuous/error.py -> build/lib/voluptuous copying voluptuous/humanize.py -> build/lib/voluptuous copying voluptuous/schema_builder.py -> build/lib/voluptuous copying voluptuous/util.py -> build/lib/voluptuous copying voluptuous/validators.py -> build/lib/voluptuous running install_lib creating /usr/local/lib/python3.6 error: could not create '/usr/local/lib/python3.6': Permission denied ---------------------------------------- Command "/var/tmp/tmpi3kn3062/python3.6 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-n_ufdj0i/voluptuous/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-nzpu8u3y-record/install-record.txt --single-version-externally-managed --compile" failed with error code 1 in /tmp/pip-build-n_ufdj0i/voluptuous/ ERROR: Command "/var/tmp/tmpi3kn3062/python3.6 -m pip.__main__ install --disable-pip-version-check -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt" returned exit status 1. ERROR: Command "/usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/home/badger/ansible LC_ALL=en_US.UTF-8 /var/tmp/tmpi3kn3062/python3.6 /home/badger/.local/bin/ansible-test sanity --test pep8 lib/ansible/module_utils/basic.py --metadata metadata-c4as9ur5.json --truncate 117 --color yes --requirements" returned exit status 1. ```
https://github.com/ansible/ansible/issues/61951
https://github.com/ansible/ansible/pull/62033
e3ea89801bae73921b298e7b4628860a272e942c
c77ab110514900ee439c7281a1d1dd14504cf44a
2019-09-06T18:15:00Z
python
2019-09-10T01:32:29Z
test/lib/ansible_test/_internal/sanity/__init__.py
"""Execute Ansible sanity tests.""" from __future__ import (absolute_import, division, print_function) __metaclass__ = type import abc import glob import json import os import re import collections from .. import types as t from ..util import ( ApplicationError, SubprocessError, display, import_plugins, load_plugins, parse_to_list_of_dict, ABC, ANSIBLE_TEST_DATA_ROOT, is_binary_file, read_lines_without_comments, get_available_python_versions, find_python, is_subdir, paths_to_dirs, get_ansible_version, ) from ..util_common import ( run_command, handle_layout_messages, ) from ..ansible_util import ( ansible_environment, check_pyyaml, ) from ..target import ( walk_internal_targets, walk_sanity_targets, TestTarget, ) from ..executor import ( get_changes_filter, AllTargetsSkipped, Delegate, install_command_requirements, SUPPORTED_PYTHON_VERSIONS, ) from ..config import ( SanityConfig, ) from ..test import ( TestSuccess, TestFailure, TestSkipped, TestMessage, calculate_best_confidence, ) from ..data import ( data_context, ) COMMAND = 'sanity' SANITY_ROOT = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'sanity') def command_sanity(args): """ :type args: SanityConfig """ handle_layout_messages(data_context().content.sanity_messages) changes = get_changes_filter(args) require = args.require + changes targets = SanityTargets.create(args.include, args.exclude, require) if not targets.include: raise AllTargetsSkipped() if args.delegate: raise Delegate(require=changes, exclude=args.exclude) install_command_requirements(args) tests = sanity_get_tests() if args.test: tests = [target for target in tests if target.name in args.test] else: disabled = [target.name for target in tests if not target.enabled and not args.allow_disabled] tests = [target for target in tests if target.enabled or args.allow_disabled] if disabled: display.warning('Skipping tests disabled by default without --allow-disabled: %s' % ', '.join(sorted(disabled))) if args.skip_test: tests = [target for target in tests if target.name not in args.skip_test] total = 0 failed = [] for test in tests: if args.list_tests: display.info(test.name) continue available_versions = sorted(get_available_python_versions(SUPPORTED_PYTHON_VERSIONS).keys()) if args.python: # specific version selected versions = (args.python,) elif isinstance(test, SanityMultipleVersion): # try all supported versions for multi-version tests when a specific version has not been selected versions = test.supported_python_versions elif not test.supported_python_versions or args.python_version in test.supported_python_versions: # the test works with any version or the version we're already running versions = (args.python_version,) else: # available versions supported by the test versions = tuple(sorted(set(available_versions) & set(test.supported_python_versions))) # use the lowest available version supported by the test or the current version as a fallback (which will be skipped) versions = versions[:1] or (args.python_version,) for version in versions: if isinstance(test, SanityMultipleVersion): skip_version = version else: skip_version = None options = '' if test.supported_python_versions and version not in test.supported_python_versions: display.warning("Skipping sanity test '%s' on unsupported Python %s." % (test.name, version)) result = SanitySkipped(test.name, skip_version) elif not args.python and version not in available_versions: display.warning("Skipping sanity test '%s' on Python %s due to missing interpreter." % (test.name, version)) result = SanitySkipped(test.name, skip_version) else: check_pyyaml(args, version) if test.supported_python_versions: display.info("Running sanity test '%s' with Python %s" % (test.name, version)) else: display.info("Running sanity test '%s'" % test.name) if isinstance(test, SanityCodeSmellTest): settings = test.load_processor(args) elif isinstance(test, SanityMultipleVersion): settings = test.load_processor(args, version) elif isinstance(test, SanitySingleVersion): settings = test.load_processor(args) elif isinstance(test, SanityVersionNeutral): settings = test.load_processor(args) else: raise Exception('Unsupported test type: %s' % type(test)) all_targets = targets.targets if test.all_targets: usable_targets = targets.targets elif test.no_targets: usable_targets = tuple() else: usable_targets = targets.include all_targets = SanityTargets.filter_and_inject_targets(test, all_targets) usable_targets = SanityTargets.filter_and_inject_targets(test, usable_targets) usable_targets = sorted(test.filter_targets(list(usable_targets))) usable_targets = settings.filter_skipped_targets(usable_targets) sanity_targets = SanityTargets(tuple(all_targets), tuple(usable_targets)) if usable_targets or test.no_targets: if isinstance(test, SanityCodeSmellTest): result = test.test(args, sanity_targets, version) elif isinstance(test, SanityMultipleVersion): result = test.test(args, sanity_targets, version) options = ' --python %s' % version elif isinstance(test, SanitySingleVersion): result = test.test(args, sanity_targets, version) elif isinstance(test, SanityVersionNeutral): result = test.test(args, sanity_targets) else: raise Exception('Unsupported test type: %s' % type(test)) else: result = SanitySkipped(test.name, skip_version) result.write(args) total += 1 if isinstance(result, SanityFailure): failed.append(result.test + options) if failed: message = 'The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % ( len(failed), total, '\n'.join(failed)) if args.failure_ok: display.error(message) else: raise ApplicationError(message) def collect_code_smell_tests(): # type: () -> t.Tuple[SanityFunc, ...] """Return a tuple of available code smell sanity tests.""" paths = glob.glob(os.path.join(SANITY_ROOT, 'code-smell', '*.py')) if data_context().content.is_ansible: # include Ansible specific code-smell tests which are not configured to be skipped ansible_code_smell_root = os.path.join(data_context().content.root, 'test', 'sanity', 'code-smell') skip_tests = read_lines_without_comments(os.path.join(ansible_code_smell_root, 'skip.txt'), remove_blank_lines=True, optional=True) paths.extend(path for path in glob.glob(os.path.join(ansible_code_smell_root, '*.py')) if os.path.basename(path) not in skip_tests) paths = sorted(p for p in paths if os.access(p, os.X_OK) and os.path.isfile(p)) tests = tuple(SanityCodeSmellTest(p) for p in paths) return tests def sanity_get_tests(): """ :rtype: tuple[SanityFunc] """ return SANITY_TESTS class SanityIgnoreParser: """Parser for the consolidated sanity test ignore file.""" NO_CODE = '_' def __init__(self, args): # type: (SanityConfig) -> None if data_context().content.collection: ansible_version = '%s.%s' % tuple(get_ansible_version().split('.')[:2]) ansible_label = 'Ansible %s' % ansible_version file_name = 'ignore-%s.txt' % ansible_version else: ansible_label = 'Ansible' file_name = 'ignore.txt' self.args = args self.relative_path = os.path.join(data_context().content.sanity_path, file_name) self.path = os.path.join(data_context().content.root, self.relative_path) self.ignores = collections.defaultdict(lambda: collections.defaultdict(dict)) # type: t.Dict[str, t.Dict[str, t.Dict[str, int]]] self.skips = collections.defaultdict(lambda: collections.defaultdict(int)) # type: t.Dict[str, t.Dict[str, int]] self.parse_errors = [] # type: t.List[t.Tuple[int, int, str]] self.file_not_found_errors = [] # type: t.List[t.Tuple[int, str]] lines = read_lines_without_comments(self.path, optional=True) targets = SanityTargets.get_targets() paths = set(target.path for target in targets) tests_by_name = {} # type: t.Dict[str, SanityTest] versioned_test_names = set() # type: t.Set[str] unversioned_test_names = {} # type: t.Dict[str, str] directories = paths_to_dirs(list(paths)) paths_by_test = {} # type: t.Dict[str, t.Set[str]] display.info('Read %d sanity test ignore line(s) for %s from: %s' % (len(lines), ansible_label, self.relative_path), verbosity=1) for test in sanity_get_tests(): test_targets = SanityTargets.filter_and_inject_targets(test, targets) paths_by_test[test.name] = set(target.path for target in test.filter_targets(test_targets)) if isinstance(test, SanityMultipleVersion): versioned_test_names.add(test.name) tests_by_name.update(dict(('%s-%s' % (test.name, python_version), test) for python_version in test.supported_python_versions)) else: unversioned_test_names.update(dict(('%s-%s' % (test.name, python_version), test.name) for python_version in SUPPORTED_PYTHON_VERSIONS)) tests_by_name[test.name] = test for line_no, line in enumerate(lines, start=1): if not line: self.parse_errors.append((line_no, 1, "Line cannot be empty or contain only a comment")) continue parts = line.split(' ') path = parts[0] codes = parts[1:] if not path: self.parse_errors.append((line_no, 1, "Line cannot start with a space")) continue if path.endswith(os.path.sep): if path not in directories: self.file_not_found_errors.append((line_no, path)) continue else: if path not in paths: self.file_not_found_errors.append((line_no, path)) continue if not codes: self.parse_errors.append((line_no, len(path), "Error code required after path")) continue code = codes[0] if not code: self.parse_errors.append((line_no, len(path) + 1, "Error code after path cannot be empty")) continue if len(codes) > 1: self.parse_errors.append((line_no, len(path) + len(code) + 2, "Error code cannot contain spaces")) continue parts = code.split('!') code = parts[0] commands = parts[1:] parts = code.split(':') test_name = parts[0] error_codes = parts[1:] test = tests_by_name.get(test_name) if not test: unversioned_name = unversioned_test_names.get(test_name) if unversioned_name: self.parse_errors.append((line_no, len(path) + len(unversioned_name) + 2, "Sanity test '%s' cannot use a Python version like '%s'" % ( unversioned_name, test_name))) elif test_name in versioned_test_names: self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires a Python version like '%s-%s'" % ( test_name, test_name, args.python_version))) else: self.parse_errors.append((line_no, len(path) + 2, "Sanity test '%s' does not exist" % test_name)) continue if path.endswith(os.path.sep) and not test.include_directories: self.parse_errors.append((line_no, 1, "Sanity test '%s' does not support directory paths" % test_name)) continue if path not in paths_by_test[test.name] and not test.no_targets: self.parse_errors.append((line_no, 1, "Sanity test '%s' does not test path '%s'" % (test_name, path))) continue if commands and error_codes: self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Error code cannot contain both '!' and ':' characters")) continue if commands: command = commands[0] if len(commands) > 1: self.parse_errors.append((line_no, len(path) + len(test_name) + len(command) + 3, "Error code cannot contain multiple '!' characters")) continue if command == 'skip': if not test.can_skip: self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' cannot be skipped" % test_name)) continue existing_line_no = self.skips.get(test_name, {}).get(path) if existing_line_no: self.parse_errors.append((line_no, 1, "Duplicate '%s' skip for path '%s' first found on line %d" % (test_name, path, existing_line_no))) continue self.skips[test_name][path] = line_no continue self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Command '!%s' not recognized" % command)) continue if not test.can_ignore: self.parse_errors.append((line_no, len(path) + 1, "Sanity test '%s' cannot be ignored" % test_name)) continue if test.error_code: if not error_codes: self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires an error code" % test_name)) continue error_code = error_codes[0] if len(error_codes) > 1: self.parse_errors.append((line_no, len(path) + len(test_name) + len(error_code) + 3, "Error code cannot contain multiple ':' characters")) continue else: if error_codes: self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' does not support error codes" % test_name)) continue error_code = self.NO_CODE existing = self.ignores.get(test_name, {}).get(path, {}).get(error_code) if existing: if test.error_code: self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for error code '%s' for path '%s' first found on line %d" % ( test_name, error_code, path, existing))) else: self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for path '%s' first found on line %d" % ( test_name, path, existing))) continue self.ignores[test_name][path][error_code] = line_no @staticmethod def load(args): # type: (SanityConfig) -> SanityIgnoreParser """Return the current SanityIgnore instance, initializing it if needed.""" try: return SanityIgnoreParser.instance except AttributeError: pass SanityIgnoreParser.instance = SanityIgnoreParser(args) return SanityIgnoreParser.instance class SanityIgnoreProcessor: """Processor for sanity test ignores for a single run of one sanity test.""" def __init__(self, args, # type: SanityConfig test, # type: SanityTest python_version, # type: t.Optional[str] ): # type: (...) -> None name = test.name code = test.error_code if python_version: full_name = '%s-%s' % (name, python_version) else: full_name = name self.args = args self.test = test self.code = code self.parser = SanityIgnoreParser.load(args) self.ignore_entries = self.parser.ignores.get(full_name, {}) self.skip_entries = self.parser.skips.get(full_name, {}) self.used_line_numbers = set() # type: t.Set[int] def filter_skipped_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] """Return the given targets, with any skipped paths filtered out.""" return sorted(target for target in targets if target.path not in self.skip_entries) def process_errors(self, errors, paths): # type: (t.List[SanityMessage], t.List[str]) -> t.List[SanityMessage] """Return the given errors filtered for ignores and with any settings related errors included.""" errors = self.filter_messages(errors) errors.extend(self.get_errors(paths)) errors = sorted(set(errors)) return errors def filter_messages(self, messages): # type: (t.List[SanityMessage]) -> t.List[SanityMessage] """Return a filtered list of the given messages using the entries that have been loaded.""" filtered = [] for message in messages: path_entry = self.ignore_entries.get(message.path) if path_entry: code = message.code if self.code else SanityIgnoreParser.NO_CODE line_no = path_entry.get(code) if line_no: self.used_line_numbers.add(line_no) continue filtered.append(message) return filtered def get_errors(self, paths): # type: (t.List[str]) -> t.List[SanityMessage] """Return error messages related to issues with the file.""" messages = [] # unused errors unused = [] # type: t.List[t.Tuple[int, str, str]] if self.test.no_targets or self.test.all_targets: # tests which do not accept a target list, or which use all targets, always return all possible errors, so all ignores can be checked targets = SanityTargets.get_targets() test_targets = SanityTargets.filter_and_inject_targets(self.test, targets) paths = [target.path for target in test_targets] for path in paths: path_entry = self.ignore_entries.get(path) if not path_entry: continue unused.extend((line_no, path, code) for code, line_no in path_entry.items() if line_no not in self.used_line_numbers) messages.extend(SanityMessage( code=self.code, message="Ignoring '%s' on '%s' is unnecessary" % (code, path) if self.code else "Ignoring '%s' is unnecessary" % path, path=self.parser.relative_path, line=line, column=1, confidence=calculate_best_confidence(((self.parser.path, line), (path, 0)), self.args.metadata) if self.args.metadata.changes else None, ) for line, path, code in unused) return messages class SanitySuccess(TestSuccess): """Sanity test success.""" def __init__(self, test, python_version=None): """ :type test: str :type python_version: str """ super(SanitySuccess, self).__init__(COMMAND, test, python_version) class SanitySkipped(TestSkipped): """Sanity test skipped.""" def __init__(self, test, python_version=None): """ :type test: str :type python_version: str """ super(SanitySkipped, self).__init__(COMMAND, test, python_version) class SanityFailure(TestFailure): """Sanity test failure.""" def __init__(self, test, python_version=None, messages=None, summary=None): """ :type test: str :type python_version: str :type messages: list[SanityMessage] :type summary: unicode """ super(SanityFailure, self).__init__(COMMAND, test, python_version, messages, summary) class SanityMessage(TestMessage): """Single sanity test message for one file.""" class SanityTargets: """Sanity test target information.""" def __init__(self, targets, include): # type: (t.Tuple[TestTarget], t.Tuple[TestTarget]) -> None self.targets = targets self.include = include @staticmethod def create(include, exclude, require): # type: (t.List[str], t.List[str], t.List[str]) -> SanityTargets """Create a SanityTargets instance from the given include, exclude and require lists.""" _targets = SanityTargets.get_targets() _include = walk_internal_targets(_targets, include, exclude, require) return SanityTargets(_targets, _include) @staticmethod def filter_and_inject_targets(test, targets): # type: (SanityTest, t.Iterable[TestTarget]) -> t.List[TestTarget] """Filter and inject targets based on test requirements and the given target list.""" test_targets = list(targets) if not test.include_symlinks: # remove all symlinks unless supported by the test test_targets = [target for target in test_targets if not target.symlink] if not test.include_directories or not test.include_symlinks: # exclude symlinked directories unless supported by the test test_targets = [target for target in test_targets if not target.path.endswith(os.path.sep)] if test.include_directories: # include directories containing any of the included files test_targets += tuple(TestTarget(path, None, None, '') for path in paths_to_dirs([target.path for target in test_targets])) if not test.include_symlinks: # remove all directory symlinks unless supported by the test test_targets = [target for target in test_targets if not target.symlink] return test_targets @staticmethod def get_targets(): # type: () -> t.Tuple[TestTarget, ...] """Return a tuple of sanity test targets. Uses a cached version when available.""" try: return SanityTargets.get_targets.targets except AttributeError: SanityTargets.get_targets.targets = tuple(sorted(walk_sanity_targets())) return SanityTargets.get_targets.targets class SanityTest(ABC): """Sanity test base class.""" __metaclass__ = abc.ABCMeta ansible_only = False def __init__(self, name): self.name = name self.enabled = True @property def error_code(self): # type: () -> t.Optional[str] """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes.""" return None @property def can_ignore(self): # type: () -> bool """True if the test supports ignore entries.""" return True @property def can_skip(self): # type: () -> bool """True if the test supports skip entries.""" return not self.all_targets and not self.no_targets @property def all_targets(self): # type: () -> bool """True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets.""" return False @property def no_targets(self): # type: () -> bool """True if the test does not use test targets. Mutually exclusive with all_targets.""" return False @property def include_directories(self): # type: () -> bool """True if the test targets should include directories.""" return False @property def include_symlinks(self): # type: () -> bool """True if the test targets should include symlinks.""" return False @property def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]] """A tuple of supported Python versions or None if the test does not depend on specific Python versions.""" return tuple(python_version for python_version in SUPPORTED_PYTHON_VERSIONS if python_version.startswith('3.')) def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] """Return the given list of test targets, filtered to include only those relevant for the test.""" if self.no_targets: return [] raise NotImplementedError('Sanity test "%s" must implement "filter_targets" or set "no_targets" to True.' % self.name) class SanityCodeSmellTest(SanityTest): """Sanity test script.""" def __init__(self, path): name = os.path.splitext(os.path.basename(path))[0] config_path = os.path.splitext(path)[0] + '.json' super(SanityCodeSmellTest, self).__init__(name) self.path = path self.config_path = config_path if os.path.exists(config_path) else None self.config = None if self.config_path: with open(self.config_path, 'r') as config_fd: self.config = json.load(config_fd) if self.config: self.enabled = not self.config.get('disabled') self.output = self.config.get('output') # type: t.Optional[str] self.extensions = self.config.get('extensions') # type: t.List[str] self.prefixes = self.config.get('prefixes') # type: t.List[str] self.files = self.config.get('files') # type: t.List[str] self.text = self.config.get('text') # type: t.Optional[bool] self.ignore_self = self.config.get('ignore_self') # type: bool self.__all_targets = self.config.get('all_targets') # type: bool self.__no_targets = self.config.get('no_targets') # type: bool self.__include_directories = self.config.get('include_directories') # type: bool self.__include_symlinks = self.config.get('include_symlinks') # type: bool else: self.output = None self.extensions = [] self.prefixes = [] self.files = [] self.text = None # type: t.Optional[bool] self.ignore_self = False self.__all_targets = False self.__no_targets = True self.__include_directories = False self.__include_symlinks = False if self.no_targets: mutually_exclusive = ( 'extensions', 'prefixes', 'files', 'text', 'ignore_self', 'all_targets', 'include_directories', 'include_symlinks', ) problems = sorted(name for name in mutually_exclusive if getattr(self, name)) if problems: raise ApplicationError('Sanity test "%s" option "no_targets" is mutually exclusive with options: %s' % (self.name, ', '.join(problems))) @property def all_targets(self): # type: () -> bool """True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets.""" return self.__all_targets @property def no_targets(self): # type: () -> bool """True if the test does not use test targets. Mutually exclusive with all_targets.""" return self.__no_targets @property def include_directories(self): # type: () -> bool """True if the test targets should include directories.""" return self.__include_directories @property def include_symlinks(self): # type: () -> bool """True if the test targets should include symlinks.""" return self.__include_symlinks def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] """Return the given list of test targets, filtered to include only those relevant for the test.""" if self.no_targets: return [] if self.text is not None: if self.text: targets = [target for target in targets if not is_binary_file(target.path)] else: targets = [target for target in targets if is_binary_file(target.path)] if self.extensions: targets = [target for target in targets if os.path.splitext(target.path)[1] in self.extensions or (is_subdir(target.path, 'bin') and '.py' in self.extensions)] if self.prefixes: targets = [target for target in targets if any(target.path.startswith(pre) for pre in self.prefixes)] if self.files: targets = [target for target in targets if os.path.basename(target.path) in self.files] if self.ignore_self and data_context().content.is_ansible: relative_self_path = os.path.relpath(self.path, data_context().content.root) targets = [target for target in targets if target.path != relative_self_path] return targets def test(self, args, targets, python_version): """ :type args: SanityConfig :type targets: SanityTargets :type python_version: str :rtype: TestResult """ cmd = [find_python(python_version), self.path] env = ansible_environment(args, color=False) pattern = None data = None settings = self.load_processor(args) paths = [target.path for target in targets.include] if self.config: if self.output == 'path-line-column-message': pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$' elif self.output == 'path-message': pattern = '^(?P<path>[^:]*): (?P<message>.*)$' else: pattern = ApplicationError('Unsupported output type: %s' % self.output) if not self.no_targets: data = '\n'.join(paths) if data: display.info(data, verbosity=4) try: stdout, stderr = run_command(args, cmd, data=data, env=env, capture=True) status = 0 except SubprocessError as ex: stdout = ex.stdout stderr = ex.stderr status = ex.status if args.explain: return SanitySuccess(self.name) if stdout and not stderr: if pattern: matches = parse_to_list_of_dict(pattern, stdout) messages = [SanityMessage( message=m['message'], path=m['path'], line=int(m.get('line', 0)), column=int(m.get('column', 0)), ) for m in matches] messages = settings.process_errors(messages, paths) if not messages: return SanitySuccess(self.name) return SanityFailure(self.name, messages=messages) if stderr or status: summary = u'%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout) return SanityFailure(self.name, summary=summary) messages = settings.process_errors([], paths) if messages: return SanityFailure(self.name, messages=messages) return SanitySuccess(self.name) def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor """Load the ignore processor for this sanity test.""" return SanityIgnoreProcessor(args, self, None) class SanityFunc(SanityTest): """Base class for sanity test plugins.""" def __init__(self): name = self.__class__.__name__ name = re.sub(r'Test$', '', name) # drop Test suffix name = re.sub(r'(.)([A-Z][a-z]+)', r'\1-\2', name).lower() # use dashes instead of capitalization super(SanityFunc, self).__init__(name) class SanityVersionNeutral(SanityFunc): """Base class for sanity test plugins which are idependent of the python version being used.""" @abc.abstractmethod def test(self, args, targets): """ :type args: SanityConfig :type targets: SanityTargets :rtype: TestResult """ def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor """Load the ignore processor for this sanity test.""" return SanityIgnoreProcessor(args, self, None) @property def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]] """A tuple of supported Python versions or None if the test does not depend on specific Python versions.""" return None class SanitySingleVersion(SanityFunc): """Base class for sanity test plugins which should run on a single python version.""" @abc.abstractmethod def test(self, args, targets, python_version): """ :type args: SanityConfig :type targets: SanityTargets :type python_version: str :rtype: TestResult """ def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor """Load the ignore processor for this sanity test.""" return SanityIgnoreProcessor(args, self, None) class SanityMultipleVersion(SanityFunc): """Base class for sanity test plugins which should run on multiple python versions.""" @abc.abstractmethod def test(self, args, targets, python_version): """ :type args: SanityConfig :type targets: SanityTargets :type python_version: str :rtype: TestResult """ def load_processor(self, args, python_version): # type: (SanityConfig, str) -> SanityIgnoreProcessor """Load the ignore processor for this sanity test.""" return SanityIgnoreProcessor(args, self, python_version) @property def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]] """A tuple of supported Python versions or None if the test does not depend on specific Python versions.""" return SUPPORTED_PYTHON_VERSIONS SANITY_TESTS = ( ) def sanity_init(): """Initialize full sanity test list (includes code-smell scripts determined at runtime).""" import_plugins('sanity') sanity_plugins = {} # type: t.Dict[str, t.Type[SanityFunc]] load_plugins(SanityFunc, sanity_plugins) sanity_tests = tuple([plugin() for plugin in sanity_plugins.values() if data_context().content.is_ansible or not plugin.ansible_only]) global SANITY_TESTS # pylint: disable=locally-disabled, global-statement SANITY_TESTS = tuple(sorted(sanity_tests + collect_code_smell_tests(), key=lambda k: k.name))
closed
ansible/ansible
https://github.com/ansible/ansible
61,951
ansible-test --venv attempts to install to system locations
##### SUMMARY After installing ansible-2.90beta1 via pip, ansible-test --venv tries to install to system locations, getting permission denied ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9.0beta1 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> * I installed this on CENTOS7 using the python3.6 package * Installed 2.9.0beta1 from pypi ##### STEPS TO REPRODUCE ``` $ python3.6 -m pip install --user ansible==2.9.0beta1 $ ansible --version ansible 2.9.0b1 config file = None configured module search path = ['/home/badger/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/badger/.local/lib/python3.6/site-packages/ansible executable location = /home/badger/.local/bin/ansible python version = 3.6.8 (default, Apr 25 2019, 21:02:35) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] $ ansible-test sanity --venv --test pep8 lib/ansible/module_utils/basic.py ``` ##### EXPECTED RESULTS ansible-test installs a venv with the required packages for the sanity test and then runs the test on lib/ansible/module_utils/basic.py ##### ACTUAL RESULTS ``` Ignoring coverage: markers "python_version > '3.7'" don't match your environment Ignoring cryptography: markers "python_version < '2.7'" don't match your environment Ignoring deepdiff: markers "python_version < '3'" don't match your environment Ignoring urllib3: markers "python_version < '2.7'" don't match your environment Ignoring sphinx: markers "python_version < '2.7'" don't match your environment Ignoring wheel: markers "python_version < '2.7'" don't match your environment Ignoring yamllint: markers "python_version < '2.7'" don't match your environment Ignoring paramiko: markers "python_version < '2.7'" don't match your environment Ignoring pytest: markers "python_version < '2.7'" don't match your environment Ignoring pytest: markers "python_version == '2.7'" don't match your environment Ignoring pytest-forked: markers "python_version < '2.7'" don't match your environment Ignoring requests: markers "python_version < '2.7'" don't match your environment Ignoring virtualenv: markers "python_version < '2.7'" don't match your environment Ignoring pyopenssl: markers "python_version < '2.7'" don't match your environment Ignoring pyyaml: markers "python_version < '2.7'" don't match your environment Ignoring pycparser: markers "python_version < '2.7'" don't match your environment Ignoring xmltodict: markers "python_version < '2.7'" don't match your environment Ignoring lxml: markers "python_version < '2.7'" don't match your environment Ignoring pyvmomi: markers "python_version < '2.7'" don't match your environment Collecting voluptuous>=0.11.0 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 25)) Using cached https://files.pythonhosted.org/packages/24/3b/fe531688c0d9e057fccc0bc9430c0a3d4b90e0d2f015326e659c2944e328/voluptuous-0.11.7.tar.gz Collecting pylint==2.3.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 45)) Using cached https://files.pythonhosted.org/packages/60/c2/b3f73f4ac008bef6e75bca4992f3963b3f85942e0277237721ef1c151f0d/pylint-2.3.1-py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): cryptography in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): jinja2 in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 2)) Collecting pycodestyle (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 3)) Using cached https://files.pythonhosted.org/packages/0e/0c/04a353e104d2f324f8ee5f4b32012618c1c86dd79e52a433b64fceed511b/pycodestyle-2.5.0-py2.py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): pyyaml in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 5)) Requirement already satisfied (use --upgrade to upgrade): rstcheck in /home/badger/.local/lib/python3.6/site-packages (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 6)) Collecting virtualenv (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 7)) Using cached https://files.pythonhosted.org/packages/8b/12/8d4f45b8962b03ac9efefe5ed5053f6b29334d83e438b4fe379d21c0cb8e/virtualenv-16.7.5-py2.py3-none-any.whl Collecting yamllint (from -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 9)) Using cached https://files.pythonhosted.org/packages/39/b5/390c956b1aad9a0de18cffa94dba8610b9eca4bd142aa56746e31a388f14/yamllint-1.17.0-py2.py3-none-any.whl Collecting astroid==2.2.5 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 41)) Using cached https://files.pythonhosted.org/packages/d5/ad/7221a62a2dbce5c3b8c57fd18e1052c7331adc19b3f27f1561aa6e620db2/astroid-2.2.5-py3-none-any.whl Collecting mccabe==0.6.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 44)) Using cached https://files.pythonhosted.org/packages/87/89/479dc97e18549e21354893e4ee4ef36db1d237534982482c3681ee6e7b57/mccabe-0.6.1-py2.py3-none-any.whl Collecting isort==4.3.15 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 42)) Using cached https://files.pythonhosted.org/packages/b6/89/3137d13dd30a0d063435661950f6dfd50957532989e49aef652f490ef616/isort-4.3.15-py2.py3-none-any.whl Requirement already satisfied (use --upgrade to upgrade): asn1crypto>=0.21.0 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): six>=1.4.1 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): cffi!=1.11.3,>=1.8 in /home/badger/.local/lib/python3.6/site-packages (from cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Requirement already satisfied (use --upgrade to upgrade): MarkupSafe>=0.23 in /home/badger/.local/lib/python3.6/site-packages (from jinja2->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 2)) Requirement already satisfied (use --upgrade to upgrade): docutils>=0.7 in /home/badger/.local/lib/python3.6/site-packages (from rstcheck->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 6)) Collecting pathspec>=0.5.3 (from yamllint->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 9)) Using cached https://files.pythonhosted.org/packages/84/2a/bfee636b1e2f7d6e30dd74f49201ccfa5c3cf322d44929ecc6c137c486c5/pathspec-0.5.9.tar.gz Collecting typed-ast==1.4.0 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 46)) Using cached https://files.pythonhosted.org/packages/31/d3/9d1802c161626d0278bafb1ffb32f76b9d01e123881bbf9d91e8ccf28e18/typed_ast-1.4.0-cp36-cp36m-manylinux1_x86_64.whl Collecting wrapt==1.11.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 47)) Using cached https://files.pythonhosted.org/packages/67/b2/0f71ca90b0ade7fad27e3d20327c996c6252a2ffe88f50a95bba7434eda9/wrapt-1.11.1.tar.gz Collecting lazy-object-proxy==1.3.1 (from -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt (line 43)) Using cached https://files.pythonhosted.org/packages/65/1f/2043ec33066e779905ed7e6580384425fdc7dc2ac64d6931060c75b0c5a3/lazy_object_proxy-1.3.1-cp36-cp36m-manylinux1_x86_64.whl Requirement already satisfied (use --upgrade to upgrade): pycparser in /home/badger/.local/lib/python3.6/site-packages (from cffi!=1.11.3,>=1.8->cryptography->-r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt (line 1)) Installing collected packages: voluptuous, typed-ast, wrapt, lazy-object-proxy, astroid, isort, mccabe, pylint, pycodestyle, virtualenv, pathspec, yamllint Running setup.py install for voluptuous ... error Complete output from command /var/tmp/tmpi3kn3062/python3.6 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-n_ufdj0i/voluptuous/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-nzpu8u3y-record/install-record.txt --single-version-externally-managed --compile: running install running build running build_py creating build creating build/lib creating build/lib/voluptuous copying voluptuous/__init__.py -> build/lib/voluptuous copying voluptuous/error.py -> build/lib/voluptuous copying voluptuous/humanize.py -> build/lib/voluptuous copying voluptuous/schema_builder.py -> build/lib/voluptuous copying voluptuous/util.py -> build/lib/voluptuous copying voluptuous/validators.py -> build/lib/voluptuous running install_lib creating /usr/local/lib/python3.6 error: could not create '/usr/local/lib/python3.6': Permission denied ---------------------------------------- Command "/var/tmp/tmpi3kn3062/python3.6 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-n_ufdj0i/voluptuous/setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record /tmp/pip-nzpu8u3y-record/install-record.txt --single-version-externally-managed --compile" failed with error code 1 in /tmp/pip-build-n_ufdj0i/voluptuous/ ERROR: Command "/var/tmp/tmpi3kn3062/python3.6 -m pip.__main__ install --disable-pip-version-check -c /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/constraints.txt -r /home/badger/.local/lib/python3.6/site-packages/ansible_test/_data/requirements/sanity.txt" returned exit status 1. ERROR: Command "/usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/home/badger/ansible LC_ALL=en_US.UTF-8 /var/tmp/tmpi3kn3062/python3.6 /home/badger/.local/bin/ansible-test sanity --test pep8 lib/ansible/module_utils/basic.py --metadata metadata-c4as9ur5.json --truncate 117 --color yes --requirements" returned exit status 1. ```
https://github.com/ansible/ansible/issues/61951
https://github.com/ansible/ansible/pull/62033
e3ea89801bae73921b298e7b4628860a272e942c
c77ab110514900ee439c7281a1d1dd14504cf44a
2019-09-06T18:15:00Z
python
2019-09-10T01:32:29Z
test/lib/ansible_test/_internal/util_common.py
"""Common utility code that depends on CommonConfig.""" from __future__ import (absolute_import, division, print_function) __metaclass__ = type import atexit import contextlib import json import os import shutil import tempfile import textwrap from . import types as t from .util import ( common_environment, COVERAGE_CONFIG_NAME, display, find_python, is_shippable, MODE_DIRECTORY, MODE_FILE_EXECUTE, PYTHON_PATHS, raw_command, to_bytes, ANSIBLE_TEST_DATA_ROOT, make_dirs, ApplicationError, ) from .data import ( data_context, ) from .provider.layout import ( LayoutMessages, ) class ResultType: """Test result type.""" BOT = None # type: ResultType COVERAGE = None # type: ResultType DATA = None # type: ResultType JUNIT = None # type: ResultType LOGS = None # type: ResultType REPORTS = None # type: ResultType TMP = None # type: ResultType @staticmethod def _populate(): ResultType.BOT = ResultType('bot') ResultType.COVERAGE = ResultType('coverage') ResultType.DATA = ResultType('data') ResultType.JUNIT = ResultType('junit') ResultType.LOGS = ResultType('logs') ResultType.REPORTS = ResultType('reports') ResultType.TMP = ResultType('.tmp') def __init__(self, name): # type: (str) -> None self.name = name @property def relative_path(self): # type: () -> str """The content relative path to the results.""" return os.path.join(data_context().content.results_path, self.name) @property def path(self): # type: () -> str """The absolute path to the results.""" return os.path.join(data_context().content.root, self.relative_path) def __str__(self): # type: () -> str return self.name # noinspection PyProtectedMember ResultType._populate() # pylint: disable=protected-access class CommonConfig: """Configuration common to all commands.""" def __init__(self, args, command): """ :type args: any :type command: str """ self.command = command self.color = args.color # type: bool self.explain = args.explain # type: bool self.verbosity = args.verbosity # type: int self.debug = args.debug # type: bool self.truncate = args.truncate # type: int self.redact = args.redact # type: bool if is_shippable(): self.redact = True self.cache = {} def get_ansible_config(self): # type: () -> str """Return the path to the Ansible config for the given config.""" return os.path.join(ANSIBLE_TEST_DATA_ROOT, 'ansible.cfg') def handle_layout_messages(messages): # type: (t.Optional[LayoutMessages]) -> None """Display the given layout messages.""" if not messages: return for message in messages.info: display.info(message, verbosity=1) for message in messages.warning: display.warning(message) if messages.error: raise ApplicationError('\n'.join(messages.error)) @contextlib.contextmanager def named_temporary_file(args, prefix, suffix, directory, content): """ :param args: CommonConfig :param prefix: str :param suffix: str :param directory: str :param content: str | bytes | unicode :rtype: str """ if args.explain: yield os.path.join(directory, '%stemp%s' % (prefix, suffix)) else: with tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, dir=directory) as tempfile_fd: tempfile_fd.write(to_bytes(content)) tempfile_fd.flush() yield tempfile_fd.name def write_json_test_results(category, name, content): # type: (ResultType, str, t.Union[t.List[t.Any], t.Dict[str, t.Any]]) -> None """Write the given json content to the specified test results path, creating directories as needed.""" path = os.path.join(category.path, name) write_json_file(path, content, create_directories=True) def write_text_test_results(category, name, content): # type: (ResultType, str, str) -> None """Write the given text content to the specified test results path, creating directories as needed.""" path = os.path.join(category.path, name) write_text_file(path, content, create_directories=True) def write_json_file(path, content, create_directories=False): # type: (str, t.Union[t.List[t.Any], t.Dict[str, t.Any]], bool) -> None """Write the given json content to the specified path, optionally creating missing directories.""" text_content = json.dumps(content, sort_keys=True, indent=4) + '\n' write_text_file(path, text_content, create_directories=create_directories) def write_text_file(path, content, create_directories=False): # type: (str, str, bool) -> None """Write the given text content to the specified path, optionally creating missing directories.""" if create_directories: make_dirs(os.path.dirname(path)) with open(to_bytes(path), 'wb') as file: file.write(to_bytes(content)) def get_python_path(args, interpreter): """ :type args: TestConfig :type interpreter: str :rtype: str """ # When the python interpreter is already named "python" its directory can simply be added to the path. # Using another level of indirection is only required when the interpreter has a different name. if os.path.basename(interpreter) == 'python': return os.path.dirname(interpreter) python_path = PYTHON_PATHS.get(interpreter) if python_path: return python_path prefix = 'python-' suffix = '-ansible' root_temp_dir = '/tmp' if args.explain: return os.path.join(root_temp_dir, ''.join((prefix, 'temp', suffix))) python_path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir) injected_interpreter = os.path.join(python_path, 'python') # A symlink is faster than the execv wrapper, but isn't compatible with virtual environments. # Attempt to detect when it is safe to use a symlink by checking the real path of the interpreter. use_symlink = os.path.dirname(os.path.realpath(interpreter)) == os.path.dirname(interpreter) if use_symlink: display.info('Injecting "%s" as a symlink to the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1) os.symlink(interpreter, injected_interpreter) else: display.info('Injecting "%s" as a execv wrapper for the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1) code = textwrap.dedent(''' #!%s from __future__ import absolute_import from os import execv from sys import argv python = '%s' execv(python, [python] + argv[1:]) ''' % (interpreter, interpreter)).lstrip() write_text_file(injected_interpreter, code) os.chmod(injected_interpreter, MODE_FILE_EXECUTE) os.chmod(python_path, MODE_DIRECTORY) if not PYTHON_PATHS: atexit.register(cleanup_python_paths) PYTHON_PATHS[interpreter] = python_path return python_path def cleanup_python_paths(): """Clean up all temporary python directories.""" for path in sorted(PYTHON_PATHS.values()): display.info('Cleaning up temporary python directory: %s' % path, verbosity=2) shutil.rmtree(path) def get_coverage_environment(args, target_name, version, temp_path, module_coverage, remote_temp_path=None): """ :type args: TestConfig :type target_name: str :type version: str :type temp_path: str :type module_coverage: bool :type remote_temp_path: str | None :rtype: dict[str, str] """ if temp_path: # integration tests (both localhost and the optional testhost) # config and results are in a temporary directory coverage_config_base_path = temp_path coverage_output_base_path = temp_path elif args.coverage_config_base_path: # unit tests, sanity tests and other special cases (localhost only) # config is in a temporary directory # results are in the source tree coverage_config_base_path = args.coverage_config_base_path coverage_output_base_path = os.path.join(data_context().content.root, data_context().content.results_path) else: raise Exception('No temp path and no coverage config base path. Check for missing coverage_context usage.') config_file = os.path.join(coverage_config_base_path, COVERAGE_CONFIG_NAME) coverage_file = os.path.join(coverage_output_base_path, ResultType.COVERAGE.name, '%s=%s=%s=%s=coverage' % ( args.command, target_name, args.coverage_label or 'local-%s' % version, 'python-%s' % version)) if not args.explain and not os.path.exists(config_file): raise Exception('Missing coverage config file: %s' % config_file) if args.coverage_check: # cause the 'coverage' module to be found, but not imported or enabled coverage_file = '' # Enable code coverage collection on local Python programs (this does not include Ansible modules). # Used by the injectors to support code coverage. # Used by the pytest unit test plugin to support code coverage. # The COVERAGE_FILE variable is also used directly by the 'coverage' module. env = dict( COVERAGE_CONF=config_file, COVERAGE_FILE=coverage_file, ) if module_coverage: # Enable code coverage collection on Ansible modules (both local and remote). # Used by the AnsiballZ wrapper generator in lib/ansible/executor/module_common.py to support code coverage. env.update(dict( _ANSIBLE_COVERAGE_CONFIG=config_file, _ANSIBLE_COVERAGE_OUTPUT=coverage_file, )) if remote_temp_path: # Include the command, target and label so the remote host can create a filename with that info. The remote # is responsible for adding '={language version}=coverage.{hostname}.{pid}.{id}' env['_ANSIBLE_COVERAGE_REMOTE_OUTPUT'] = os.path.join(remote_temp_path, '%s=%s=%s' % ( args.command, target_name, args.coverage_label or 'remote')) env['_ANSIBLE_COVERAGE_REMOTE_WHITELIST'] = os.path.join(data_context().content.root, '*') return env def intercept_command(args, cmd, target_name, env, capture=False, data=None, cwd=None, python_version=None, temp_path=None, module_coverage=True, virtualenv=None, disable_coverage=False, remote_temp_path=None): """ :type args: TestConfig :type cmd: collections.Iterable[str] :type target_name: str :type env: dict[str, str] :type capture: bool :type data: str | None :type cwd: str | None :type python_version: str | None :type temp_path: str | None :type module_coverage: bool :type virtualenv: str | None :type disable_coverage: bool :type remote_temp_path: str | None :rtype: str | None, str | None """ if not env: env = common_environment() cmd = list(cmd) version = python_version or args.python_version interpreter = virtualenv or find_python(version) inject_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'injector') if not virtualenv: # injection of python into the path is required when not activating a virtualenv # otherwise scripts may find the wrong interpreter or possibly no interpreter python_path = get_python_path(args, interpreter) inject_path = python_path + os.path.pathsep + inject_path env['PATH'] = inject_path + os.path.pathsep + env['PATH'] env['ANSIBLE_TEST_PYTHON_VERSION'] = version env['ANSIBLE_TEST_PYTHON_INTERPRETER'] = interpreter if args.coverage and not disable_coverage: # add the necessary environment variables to enable code coverage collection env.update(get_coverage_environment(args, target_name, version, temp_path, module_coverage, remote_temp_path=remote_temp_path)) return run_command(args, cmd, capture=capture, env=env, data=data, cwd=cwd) def run_command(args, cmd, capture=False, env=None, data=None, cwd=None, always=False, stdin=None, stdout=None, cmd_verbosity=1, str_errors='strict'): """ :type args: CommonConfig :type cmd: collections.Iterable[str] :type capture: bool :type env: dict[str, str] | None :type data: str | None :type cwd: str | None :type always: bool :type stdin: file | None :type stdout: file | None :type cmd_verbosity: int :type str_errors: str :rtype: str | None, str | None """ explain = args.explain and not always return raw_command(cmd, capture=capture, env=env, data=data, cwd=cwd, explain=explain, stdin=stdin, stdout=stdout, cmd_verbosity=cmd_verbosity, str_errors=str_errors)
closed
ansible/ansible
https://github.com/ansible/ansible
61,946
ansible-test --venv fails to find its library
##### SUMMARY When installed from the ansible-2.9 PPA https://launchpad.net/~ansible/+archive/ubuntu/ansible-2.9 or from the nightly ansible rpms for RHEL7, ansible-test --venv fails to find its internal libraries ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9.0beta1 devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Ubuntu18 or CentOS7. I suspect that this is irrespective of the OS but may be related to how we are presently packaging. Ubuntu18 PPA and CentOS7 are both using python-2.7 with the separate python-virtualenv package at this time. ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> I did these steps in a fresh centos7 VM: Create a yum repo file for the nightlies: ``` $ cat ansible-nightly.repo (08:59:07) [ansible-nightly] name=Nightly Ansible packaging - $basearch baseurl=https://releases.ansible.com/ansible/rpm/nightly/devel/epel-7-$basearch/ failovermethod=priority enabled=0 #gpgcheck=1 #gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 ``` use the ansible-nightly repo to install ansible-test ``` sudo yum install ansible-test --enablerepo=ansible-nightly --nogpgcheck # ansible-test package and ansible package will be installed $ ansible --version ansible 2.10.0.dev0 config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/badger/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/site-packages/ansible executable location = /usr/bin/ansible python version = 2.7.5 (default, Jun 20 2019, 20:27:34) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] $ git clone git://github.com/ansible/ansible $ cd ansible $ ansible-test sanity --venv --test pep8 lib/ansible/module_utils/basic.py Traceback (most recent call last): File "/usr/bin/ansible-test", line 28, in <module> main() File "/usr/bin/ansible-test", line 22, in main from ansible_test._internal.cli import main as cli_main ImportError: No module named ansible_test._internal.cli ERROR: Command "/usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/home/badger/ansible LC_ALL=en_US.UTF-8 /var/tmp/tmpYdP0oT/python2.7 /usr/bin/ansible-test sanity --test pep8 lib/ansible/module_utils/basic.py --metadata metadata-X1O5jT.json --truncate 117 --color yes --requirements" returned exit status 1. ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS ansible-test works with the --venv switch ##### ACTUAL RESULTS I believe this is the venv directory: ```~/ansible/test/results/.tmp/delegation/python2.7``` Contents of site-packages there: ``` [pts/0@centos7-master ~/ansible/test/results/.tmp/delegation/python2.7]$ ls -l lib/python2.7/site-packages total 20 -rw-rw-r--. 1 badger badger 126 Sep 5 14:39 easy_install.py -rw-rw-r--. 1 badger badger 315 Sep 5 14:39 easy_install.pyc drwxrwxr-x. 10 badger badger 4096 Sep 5 14:39 pip drwxrwxr-x. 2 badger badger 155 Sep 5 14:39 pip-9.0.1.dist-info drwxrwxr-x. 4 badger badger 74 Sep 5 14:39 pkg_resources drwxrwxr-x. 4 badger badger 4096 Sep 5 14:39 setuptools drwxrwxr-x. 2 badger badger 199 Sep 5 14:39 setuptools-28.8.0.dist-info drwxrwxr-x. 5 badger badger 4096 Sep 5 14:39 wheel drwxrwxr-x. 2 badger badger 174 Sep 5 14:39 wheel-0.29.0.dist-info ``` Possible cause: ansible and/or ansible_test needs to be installed into the venv before ansible-test is re-invoked?
https://github.com/ansible/ansible/issues/61946
https://github.com/ansible/ansible/pull/62033
e3ea89801bae73921b298e7b4628860a272e942c
c77ab110514900ee439c7281a1d1dd14504cf44a
2019-09-06T16:53:27Z
python
2019-09-10T01:32:29Z
changelogs/fragments/ansible-test-execv-wrapper-shebang.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,946
ansible-test --venv fails to find its library
##### SUMMARY When installed from the ansible-2.9 PPA https://launchpad.net/~ansible/+archive/ubuntu/ansible-2.9 or from the nightly ansible rpms for RHEL7, ansible-test --venv fails to find its internal libraries ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9.0beta1 devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Ubuntu18 or CentOS7. I suspect that this is irrespective of the OS but may be related to how we are presently packaging. Ubuntu18 PPA and CentOS7 are both using python-2.7 with the separate python-virtualenv package at this time. ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> I did these steps in a fresh centos7 VM: Create a yum repo file for the nightlies: ``` $ cat ansible-nightly.repo (08:59:07) [ansible-nightly] name=Nightly Ansible packaging - $basearch baseurl=https://releases.ansible.com/ansible/rpm/nightly/devel/epel-7-$basearch/ failovermethod=priority enabled=0 #gpgcheck=1 #gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 ``` use the ansible-nightly repo to install ansible-test ``` sudo yum install ansible-test --enablerepo=ansible-nightly --nogpgcheck # ansible-test package and ansible package will be installed $ ansible --version ansible 2.10.0.dev0 config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/badger/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/site-packages/ansible executable location = /usr/bin/ansible python version = 2.7.5 (default, Jun 20 2019, 20:27:34) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] $ git clone git://github.com/ansible/ansible $ cd ansible $ ansible-test sanity --venv --test pep8 lib/ansible/module_utils/basic.py Traceback (most recent call last): File "/usr/bin/ansible-test", line 28, in <module> main() File "/usr/bin/ansible-test", line 22, in main from ansible_test._internal.cli import main as cli_main ImportError: No module named ansible_test._internal.cli ERROR: Command "/usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/home/badger/ansible LC_ALL=en_US.UTF-8 /var/tmp/tmpYdP0oT/python2.7 /usr/bin/ansible-test sanity --test pep8 lib/ansible/module_utils/basic.py --metadata metadata-X1O5jT.json --truncate 117 --color yes --requirements" returned exit status 1. ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS ansible-test works with the --venv switch ##### ACTUAL RESULTS I believe this is the venv directory: ```~/ansible/test/results/.tmp/delegation/python2.7``` Contents of site-packages there: ``` [pts/0@centos7-master ~/ansible/test/results/.tmp/delegation/python2.7]$ ls -l lib/python2.7/site-packages total 20 -rw-rw-r--. 1 badger badger 126 Sep 5 14:39 easy_install.py -rw-rw-r--. 1 badger badger 315 Sep 5 14:39 easy_install.pyc drwxrwxr-x. 10 badger badger 4096 Sep 5 14:39 pip drwxrwxr-x. 2 badger badger 155 Sep 5 14:39 pip-9.0.1.dist-info drwxrwxr-x. 4 badger badger 74 Sep 5 14:39 pkg_resources drwxrwxr-x. 4 badger badger 4096 Sep 5 14:39 setuptools drwxrwxr-x. 2 badger badger 199 Sep 5 14:39 setuptools-28.8.0.dist-info drwxrwxr-x. 5 badger badger 4096 Sep 5 14:39 wheel drwxrwxr-x. 2 badger badger 174 Sep 5 14:39 wheel-0.29.0.dist-info ``` Possible cause: ansible and/or ansible_test needs to be installed into the venv before ansible-test is re-invoked?
https://github.com/ansible/ansible/issues/61946
https://github.com/ansible/ansible/pull/62033
e3ea89801bae73921b298e7b4628860a272e942c
c77ab110514900ee439c7281a1d1dd14504cf44a
2019-09-06T16:53:27Z
python
2019-09-10T01:32:29Z
changelogs/fragments/ansible-test-sanity-requirements.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,946
ansible-test --venv fails to find its library
##### SUMMARY When installed from the ansible-2.9 PPA https://launchpad.net/~ansible/+archive/ubuntu/ansible-2.9 or from the nightly ansible rpms for RHEL7, ansible-test --venv fails to find its internal libraries ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9.0beta1 devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Ubuntu18 or CentOS7. I suspect that this is irrespective of the OS but may be related to how we are presently packaging. Ubuntu18 PPA and CentOS7 are both using python-2.7 with the separate python-virtualenv package at this time. ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> I did these steps in a fresh centos7 VM: Create a yum repo file for the nightlies: ``` $ cat ansible-nightly.repo (08:59:07) [ansible-nightly] name=Nightly Ansible packaging - $basearch baseurl=https://releases.ansible.com/ansible/rpm/nightly/devel/epel-7-$basearch/ failovermethod=priority enabled=0 #gpgcheck=1 #gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 ``` use the ansible-nightly repo to install ansible-test ``` sudo yum install ansible-test --enablerepo=ansible-nightly --nogpgcheck # ansible-test package and ansible package will be installed $ ansible --version ansible 2.10.0.dev0 config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/badger/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/site-packages/ansible executable location = /usr/bin/ansible python version = 2.7.5 (default, Jun 20 2019, 20:27:34) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] $ git clone git://github.com/ansible/ansible $ cd ansible $ ansible-test sanity --venv --test pep8 lib/ansible/module_utils/basic.py Traceback (most recent call last): File "/usr/bin/ansible-test", line 28, in <module> main() File "/usr/bin/ansible-test", line 22, in main from ansible_test._internal.cli import main as cli_main ImportError: No module named ansible_test._internal.cli ERROR: Command "/usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/home/badger/ansible LC_ALL=en_US.UTF-8 /var/tmp/tmpYdP0oT/python2.7 /usr/bin/ansible-test sanity --test pep8 lib/ansible/module_utils/basic.py --metadata metadata-X1O5jT.json --truncate 117 --color yes --requirements" returned exit status 1. ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS ansible-test works with the --venv switch ##### ACTUAL RESULTS I believe this is the venv directory: ```~/ansible/test/results/.tmp/delegation/python2.7``` Contents of site-packages there: ``` [pts/0@centos7-master ~/ansible/test/results/.tmp/delegation/python2.7]$ ls -l lib/python2.7/site-packages total 20 -rw-rw-r--. 1 badger badger 126 Sep 5 14:39 easy_install.py -rw-rw-r--. 1 badger badger 315 Sep 5 14:39 easy_install.pyc drwxrwxr-x. 10 badger badger 4096 Sep 5 14:39 pip drwxrwxr-x. 2 badger badger 155 Sep 5 14:39 pip-9.0.1.dist-info drwxrwxr-x. 4 badger badger 74 Sep 5 14:39 pkg_resources drwxrwxr-x. 4 badger badger 4096 Sep 5 14:39 setuptools drwxrwxr-x. 2 badger badger 199 Sep 5 14:39 setuptools-28.8.0.dist-info drwxrwxr-x. 5 badger badger 4096 Sep 5 14:39 wheel drwxrwxr-x. 2 badger badger 174 Sep 5 14:39 wheel-0.29.0.dist-info ``` Possible cause: ansible and/or ansible_test needs to be installed into the venv before ansible-test is re-invoked?
https://github.com/ansible/ansible/issues/61946
https://github.com/ansible/ansible/pull/62033
e3ea89801bae73921b298e7b4628860a272e942c
c77ab110514900ee439c7281a1d1dd14504cf44a
2019-09-06T16:53:27Z
python
2019-09-10T01:32:29Z
changelogs/fragments/ansible-test-venv-activation.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,946
ansible-test --venv fails to find its library
##### SUMMARY When installed from the ansible-2.9 PPA https://launchpad.net/~ansible/+archive/ubuntu/ansible-2.9 or from the nightly ansible rpms for RHEL7, ansible-test --venv fails to find its internal libraries ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9.0beta1 devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Ubuntu18 or CentOS7. I suspect that this is irrespective of the OS but may be related to how we are presently packaging. Ubuntu18 PPA and CentOS7 are both using python-2.7 with the separate python-virtualenv package at this time. ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> I did these steps in a fresh centos7 VM: Create a yum repo file for the nightlies: ``` $ cat ansible-nightly.repo (08:59:07) [ansible-nightly] name=Nightly Ansible packaging - $basearch baseurl=https://releases.ansible.com/ansible/rpm/nightly/devel/epel-7-$basearch/ failovermethod=priority enabled=0 #gpgcheck=1 #gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 ``` use the ansible-nightly repo to install ansible-test ``` sudo yum install ansible-test --enablerepo=ansible-nightly --nogpgcheck # ansible-test package and ansible package will be installed $ ansible --version ansible 2.10.0.dev0 config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/badger/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/site-packages/ansible executable location = /usr/bin/ansible python version = 2.7.5 (default, Jun 20 2019, 20:27:34) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] $ git clone git://github.com/ansible/ansible $ cd ansible $ ansible-test sanity --venv --test pep8 lib/ansible/module_utils/basic.py Traceback (most recent call last): File "/usr/bin/ansible-test", line 28, in <module> main() File "/usr/bin/ansible-test", line 22, in main from ansible_test._internal.cli import main as cli_main ImportError: No module named ansible_test._internal.cli ERROR: Command "/usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/home/badger/ansible LC_ALL=en_US.UTF-8 /var/tmp/tmpYdP0oT/python2.7 /usr/bin/ansible-test sanity --test pep8 lib/ansible/module_utils/basic.py --metadata metadata-X1O5jT.json --truncate 117 --color yes --requirements" returned exit status 1. ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS ansible-test works with the --venv switch ##### ACTUAL RESULTS I believe this is the venv directory: ```~/ansible/test/results/.tmp/delegation/python2.7``` Contents of site-packages there: ``` [pts/0@centos7-master ~/ansible/test/results/.tmp/delegation/python2.7]$ ls -l lib/python2.7/site-packages total 20 -rw-rw-r--. 1 badger badger 126 Sep 5 14:39 easy_install.py -rw-rw-r--. 1 badger badger 315 Sep 5 14:39 easy_install.pyc drwxrwxr-x. 10 badger badger 4096 Sep 5 14:39 pip drwxrwxr-x. 2 badger badger 155 Sep 5 14:39 pip-9.0.1.dist-info drwxrwxr-x. 4 badger badger 74 Sep 5 14:39 pkg_resources drwxrwxr-x. 4 badger badger 4096 Sep 5 14:39 setuptools drwxrwxr-x. 2 badger badger 199 Sep 5 14:39 setuptools-28.8.0.dist-info drwxrwxr-x. 5 badger badger 4096 Sep 5 14:39 wheel drwxrwxr-x. 2 badger badger 174 Sep 5 14:39 wheel-0.29.0.dist-info ``` Possible cause: ansible and/or ansible_test needs to be installed into the venv before ansible-test is re-invoked?
https://github.com/ansible/ansible/issues/61946
https://github.com/ansible/ansible/pull/62033
e3ea89801bae73921b298e7b4628860a272e942c
c77ab110514900ee439c7281a1d1dd14504cf44a
2019-09-06T16:53:27Z
python
2019-09-10T01:32:29Z
changelogs/fragments/ansible-test-venv-pythonpath.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,946
ansible-test --venv fails to find its library
##### SUMMARY When installed from the ansible-2.9 PPA https://launchpad.net/~ansible/+archive/ubuntu/ansible-2.9 or from the nightly ansible rpms for RHEL7, ansible-test --venv fails to find its internal libraries ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9.0beta1 devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Ubuntu18 or CentOS7. I suspect that this is irrespective of the OS but may be related to how we are presently packaging. Ubuntu18 PPA and CentOS7 are both using python-2.7 with the separate python-virtualenv package at this time. ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> I did these steps in a fresh centos7 VM: Create a yum repo file for the nightlies: ``` $ cat ansible-nightly.repo (08:59:07) [ansible-nightly] name=Nightly Ansible packaging - $basearch baseurl=https://releases.ansible.com/ansible/rpm/nightly/devel/epel-7-$basearch/ failovermethod=priority enabled=0 #gpgcheck=1 #gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 ``` use the ansible-nightly repo to install ansible-test ``` sudo yum install ansible-test --enablerepo=ansible-nightly --nogpgcheck # ansible-test package and ansible package will be installed $ ansible --version ansible 2.10.0.dev0 config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/badger/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/site-packages/ansible executable location = /usr/bin/ansible python version = 2.7.5 (default, Jun 20 2019, 20:27:34) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] $ git clone git://github.com/ansible/ansible $ cd ansible $ ansible-test sanity --venv --test pep8 lib/ansible/module_utils/basic.py Traceback (most recent call last): File "/usr/bin/ansible-test", line 28, in <module> main() File "/usr/bin/ansible-test", line 22, in main from ansible_test._internal.cli import main as cli_main ImportError: No module named ansible_test._internal.cli ERROR: Command "/usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/home/badger/ansible LC_ALL=en_US.UTF-8 /var/tmp/tmpYdP0oT/python2.7 /usr/bin/ansible-test sanity --test pep8 lib/ansible/module_utils/basic.py --metadata metadata-X1O5jT.json --truncate 117 --color yes --requirements" returned exit status 1. ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS ansible-test works with the --venv switch ##### ACTUAL RESULTS I believe this is the venv directory: ```~/ansible/test/results/.tmp/delegation/python2.7``` Contents of site-packages there: ``` [pts/0@centos7-master ~/ansible/test/results/.tmp/delegation/python2.7]$ ls -l lib/python2.7/site-packages total 20 -rw-rw-r--. 1 badger badger 126 Sep 5 14:39 easy_install.py -rw-rw-r--. 1 badger badger 315 Sep 5 14:39 easy_install.pyc drwxrwxr-x. 10 badger badger 4096 Sep 5 14:39 pip drwxrwxr-x. 2 badger badger 155 Sep 5 14:39 pip-9.0.1.dist-info drwxrwxr-x. 4 badger badger 74 Sep 5 14:39 pkg_resources drwxrwxr-x. 4 badger badger 4096 Sep 5 14:39 setuptools drwxrwxr-x. 2 badger badger 199 Sep 5 14:39 setuptools-28.8.0.dist-info drwxrwxr-x. 5 badger badger 4096 Sep 5 14:39 wheel drwxrwxr-x. 2 badger badger 174 Sep 5 14:39 wheel-0.29.0.dist-info ``` Possible cause: ansible and/or ansible_test needs to be installed into the venv before ansible-test is re-invoked?
https://github.com/ansible/ansible/issues/61946
https://github.com/ansible/ansible/pull/62033
e3ea89801bae73921b298e7b4628860a272e942c
c77ab110514900ee439c7281a1d1dd14504cf44a
2019-09-06T16:53:27Z
python
2019-09-10T01:32:29Z
test/lib/ansible_test/_internal/delegation.py
"""Delegate test execution to another environment.""" from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import re import sys import tempfile from . import types as t from .executor import ( SUPPORTED_PYTHON_VERSIONS, HTTPTESTER_HOSTS, create_shell_command, run_httptester, start_httptester, get_python_interpreter, get_python_version, get_docker_completion, get_remote_completion, ) from .config import ( TestConfig, EnvironmentConfig, IntegrationConfig, WindowsIntegrationConfig, NetworkIntegrationConfig, ShellConfig, SanityConfig, UnitsConfig, ) from .core_ci import ( AnsibleCoreCI, ) from .manage_ci import ( ManagePosixCI, ManageWindowsCI, ) from .util import ( ApplicationError, common_environment, pass_vars, display, ANSIBLE_BIN_PATH, ANSIBLE_TEST_DATA_ROOT, tempdir, ) from .util_common import ( run_command, ResultType, ) from .docker_util import ( docker_exec, docker_get, docker_pull, docker_put, docker_rm, docker_run, docker_available, docker_network_disconnect, get_docker_networks, ) from .cloud import ( get_cloud_providers, ) from .target import ( IntegrationTarget, ) from .data import ( data_context, ) from .payload import ( create_payload, ) from .venv import ( create_virtual_environment, ) def check_delegation_args(args): """ :type args: CommonConfig """ if not isinstance(args, EnvironmentConfig): return if args.docker: get_python_version(args, get_docker_completion(), args.docker_raw) elif args.remote: get_python_version(args, get_remote_completion(), args.remote) def delegate(args, exclude, require, integration_targets): """ :type args: EnvironmentConfig :type exclude: list[str] :type require: list[str] :type integration_targets: tuple[IntegrationTarget] :rtype: bool """ if isinstance(args, TestConfig): with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=data_context().content.root) as metadata_fd: args.metadata_path = os.path.basename(metadata_fd.name) args.metadata.to_file(args.metadata_path) try: return delegate_command(args, exclude, require, integration_targets) finally: args.metadata_path = None else: return delegate_command(args, exclude, require, integration_targets) def delegate_command(args, exclude, require, integration_targets): """ :type args: EnvironmentConfig :type exclude: list[str] :type require: list[str] :type integration_targets: tuple[IntegrationTarget] :rtype: bool """ if args.venv: delegate_venv(args, exclude, require, integration_targets) return True if args.tox: delegate_tox(args, exclude, require, integration_targets) return True if args.docker: delegate_docker(args, exclude, require, integration_targets) return True if args.remote: delegate_remote(args, exclude, require, integration_targets) return True return False def delegate_tox(args, exclude, require, integration_targets): """ :type args: EnvironmentConfig :type exclude: list[str] :type require: list[str] :type integration_targets: tuple[IntegrationTarget] """ if args.python: versions = (args.python_version,) if args.python_version not in SUPPORTED_PYTHON_VERSIONS: raise ApplicationError('tox does not support Python version %s' % args.python_version) else: versions = SUPPORTED_PYTHON_VERSIONS if args.httptester: needs_httptester = sorted(target.name for target in integration_targets if 'needs/httptester/' in target.aliases) if needs_httptester: display.warning('Use --docker or --remote to enable httptester for tests marked "needs/httptester": %s' % ', '.join(needs_httptester)) options = { '--tox': args.tox_args, '--tox-sitepackages': 0, } for version in versions: tox = ['tox', '-c', os.path.join(ANSIBLE_TEST_DATA_ROOT, 'tox.ini'), '-e', 'py' + version.replace('.', '')] if args.tox_sitepackages: tox.append('--sitepackages') tox.append('--') cmd = generate_command(args, None, ANSIBLE_BIN_PATH, data_context().content.root, options, exclude, require) if not args.python: cmd += ['--python', version] # newer versions of tox do not support older python versions and will silently fall back to a different version # passing this option will allow the delegated ansible-test to verify it is running under the expected python version # tox 3.0.0 dropped official python 2.6 support: https://tox.readthedocs.io/en/latest/changelog.html#v3-0-0-2018-04-02 # tox 3.1.3 is the first version to support python 3.8 and later: https://tox.readthedocs.io/en/latest/changelog.html#v3-1-3-2018-08-03 # tox 3.1.3 appears to still work with python 2.6, making it a good version to use when supporting all python versions we use # virtualenv 16.0.0 dropped python 2.6 support: https://virtualenv.pypa.io/en/latest/changes/#v16-0-0-2018-05-16 cmd += ['--check-python', version] if isinstance(args, TestConfig): if args.coverage and not args.coverage_label: cmd += ['--coverage-label', 'tox-%s' % version] env = common_environment() # temporary solution to permit ansible-test delegated to tox to provision remote resources optional = ( 'SHIPPABLE', 'SHIPPABLE_BUILD_ID', 'SHIPPABLE_JOB_NUMBER', ) env.update(pass_vars(required=[], optional=optional)) run_command(args, tox + cmd, env=env) def delegate_venv(args, # type: EnvironmentConfig exclude, # type: t.List[str] require, # type: t.List[str] integration_targets, # type: t.Tuple[IntegrationTarget, ...] ): # type: (...) -> None """Delegate ansible-test execution to a virtual environment using venv or virtualenv.""" if args.python: versions = (args.python_version,) else: versions = SUPPORTED_PYTHON_VERSIONS if args.httptester: needs_httptester = sorted(target.name for target in integration_targets if 'needs/httptester/' in target.aliases) if needs_httptester: display.warning('Use --docker or --remote to enable httptester for tests marked "needs/httptester": %s' % ', '.join(needs_httptester)) venvs = dict((version, os.path.join(ResultType.TMP.path, 'delegation', 'python%s' % version)) for version in versions) venvs = dict((version, path) for version, path in venvs.items() if create_virtual_environment(args, version, path)) if not venvs: raise ApplicationError('No usable virtual environment support found.') options = { '--venv': 0, } with tempdir() as inject_path: for version, path in venvs.items(): os.symlink(os.path.join(path, 'bin', 'python'), os.path.join(inject_path, 'python%s' % version)) python_interpreter = os.path.join(inject_path, 'python%s' % args.python_version) cmd = generate_command(args, python_interpreter, ANSIBLE_BIN_PATH, data_context().content.root, options, exclude, require) if isinstance(args, TestConfig): if args.coverage and not args.coverage_label: cmd += ['--coverage-label', 'venv'] env = common_environment() env.update( PATH=inject_path + os.pathsep + env['PATH'], ) run_command(args, cmd, env=env) def delegate_docker(args, exclude, require, integration_targets): """ :type args: EnvironmentConfig :type exclude: list[str] :type require: list[str] :type integration_targets: tuple[IntegrationTarget] """ test_image = args.docker privileged = args.docker_privileged if isinstance(args, ShellConfig): use_httptester = args.httptester else: use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets) if use_httptester: docker_pull(args, args.httptester) docker_pull(args, test_image) httptester_id = None test_id = None options = { '--docker': 1, '--docker-privileged': 0, '--docker-util': 1, } python_interpreter = get_python_interpreter(args, get_docker_completion(), args.docker_raw) install_root = '/root/ansible' if data_context().content.collection: content_root = os.path.join(install_root, data_context().content.collection.directory) else: content_root = install_root remote_results_root = os.path.join(content_root, data_context().content.results_path) cmd = generate_command(args, python_interpreter, os.path.join(install_root, 'bin'), content_root, options, exclude, require) if isinstance(args, TestConfig): if args.coverage and not args.coverage_label: image_label = args.docker_raw image_label = re.sub('[^a-zA-Z0-9]+', '-', image_label) cmd += ['--coverage-label', 'docker-%s' % image_label] if isinstance(args, IntegrationConfig): if not args.allow_destructive: cmd.append('--allow-destructive') cmd_options = [] if isinstance(args, ShellConfig) or (isinstance(args, IntegrationConfig) and args.debug_strategy): cmd_options.append('-it') with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as local_source_fd: try: create_payload(args, local_source_fd.name) if use_httptester: httptester_id = run_httptester(args) else: httptester_id = None test_options = [ '--detach', '--volume', '/sys/fs/cgroup:/sys/fs/cgroup:ro', '--privileged=%s' % str(privileged).lower(), ] if args.docker_memory: test_options.extend([ '--memory=%d' % args.docker_memory, '--memory-swap=%d' % args.docker_memory, ]) docker_socket = '/var/run/docker.sock' if args.docker_seccomp != 'default': test_options += ['--security-opt', 'seccomp=%s' % args.docker_seccomp] if os.path.exists(docker_socket): test_options += ['--volume', '%s:%s' % (docker_socket, docker_socket)] if httptester_id: test_options += ['--env', 'HTTPTESTER=1'] for host in HTTPTESTER_HOSTS: test_options += ['--link', '%s:%s' % (httptester_id, host)] if isinstance(args, IntegrationConfig): cloud_platforms = get_cloud_providers(args) for cloud_platform in cloud_platforms: test_options += cloud_platform.get_docker_run_options() test_id = docker_run(args, test_image, options=test_options)[0] if args.explain: test_id = 'test_id' else: test_id = test_id.strip() # write temporary files to /root since /tmp isn't ready immediately on container start docker_put(args, test_id, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'docker.sh'), '/root/docker.sh') docker_exec(args, test_id, ['/bin/bash', '/root/docker.sh']) docker_put(args, test_id, local_source_fd.name, '/root/ansible.tgz') docker_exec(args, test_id, ['mkdir', '/root/ansible']) docker_exec(args, test_id, ['tar', 'oxzf', '/root/ansible.tgz', '-C', '/root/ansible']) # docker images are only expected to have a single python version available if isinstance(args, UnitsConfig) and not args.python: cmd += ['--python', 'default'] # run unit tests unprivileged to prevent stray writes to the source tree # also disconnect from the network once requirements have been installed if isinstance(args, UnitsConfig): writable_dirs = [ os.path.join(content_root, ResultType.JUNIT.relative_path), os.path.join(content_root, ResultType.COVERAGE.relative_path), ] docker_exec(args, test_id, ['mkdir', '-p'] + writable_dirs) docker_exec(args, test_id, ['chmod', '777'] + writable_dirs) docker_exec(args, test_id, ['chmod', '755', '/root']) docker_exec(args, test_id, ['chmod', '644', os.path.join(content_root, args.metadata_path)]) docker_exec(args, test_id, ['useradd', 'pytest', '--create-home']) docker_exec(args, test_id, cmd + ['--requirements-mode', 'only'], options=cmd_options) networks = get_docker_networks(args, test_id) for network in networks: docker_network_disconnect(args, test_id, network) cmd += ['--requirements-mode', 'skip'] cmd_options += ['--user', 'pytest'] try: docker_exec(args, test_id, cmd, options=cmd_options) finally: local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path)) remote_test_root = os.path.dirname(remote_results_root) remote_results_name = os.path.basename(remote_results_root) remote_temp_file = os.path.join('/root', remote_results_name + '.tgz') with tempfile.NamedTemporaryFile(prefix='ansible-result-', suffix='.tgz') as local_result_fd: docker_exec(args, test_id, ['tar', 'czf', remote_temp_file, '-C', remote_test_root, remote_results_name]) docker_get(args, test_id, remote_temp_file, local_result_fd.name) run_command(args, ['tar', 'oxzf', local_result_fd.name, '-C', local_test_root]) finally: if httptester_id: docker_rm(args, httptester_id) if test_id: docker_rm(args, test_id) def delegate_remote(args, exclude, require, integration_targets): """ :type args: EnvironmentConfig :type exclude: list[str] :type require: list[str] :type integration_targets: tuple[IntegrationTarget] """ parts = args.remote.split('/', 1) platform = parts[0] version = parts[1] core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider) success = False raw = False if isinstance(args, ShellConfig): use_httptester = args.httptester raw = args.raw else: use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets) if use_httptester and not docker_available(): display.warning('Assuming --disable-httptester since `docker` is not available.') use_httptester = False httptester_id = None ssh_options = [] content_root = None try: core_ci.start() if use_httptester: httptester_id, ssh_options = start_httptester(args) core_ci.wait() python_version = get_python_version(args, get_remote_completion(), args.remote) if platform == 'windows': # Windows doesn't need the ansible-test fluff, just run the SSH command manage = ManageWindowsCI(core_ci) manage.setup(python_version) cmd = ['powershell.exe'] elif raw: manage = ManagePosixCI(core_ci) manage.setup(python_version) cmd = create_shell_command(['bash']) else: manage = ManagePosixCI(core_ci) pwd = manage.setup(python_version) options = { '--remote': 1, } python_interpreter = get_python_interpreter(args, get_remote_completion(), args.remote) install_root = os.path.join(pwd, 'ansible') if data_context().content.collection: content_root = os.path.join(install_root, data_context().content.collection.directory) else: content_root = install_root cmd = generate_command(args, python_interpreter, os.path.join(install_root, 'bin'), content_root, options, exclude, require) if httptester_id: cmd += ['--inject-httptester'] if isinstance(args, TestConfig): if args.coverage and not args.coverage_label: cmd += ['--coverage-label', 'remote-%s-%s' % (platform, version)] if isinstance(args, IntegrationConfig): if not args.allow_destructive: cmd.append('--allow-destructive') # remote instances are only expected to have a single python version available if isinstance(args, UnitsConfig) and not args.python: cmd += ['--python', 'default'] if isinstance(args, IntegrationConfig): cloud_platforms = get_cloud_providers(args) for cloud_platform in cloud_platforms: ssh_options += cloud_platform.get_remote_ssh_options() try: manage.ssh(cmd, ssh_options) success = True finally: download = False if platform != 'windows': download = True if isinstance(args, ShellConfig): if args.raw: download = False if download and content_root: local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path)) remote_results_root = os.path.join(content_root, data_context().content.results_path) remote_results_name = os.path.basename(remote_results_root) remote_temp_path = os.path.join('/tmp', remote_results_name) manage.ssh('rm -rf {0} && cp -a {1} {0} && chmod -R a+r {0}'.format(remote_temp_path, remote_results_root)) manage.download(remote_temp_path, local_test_root) finally: if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success): core_ci.stop() if httptester_id: docker_rm(args, httptester_id) def generate_command(args, python_interpreter, ansible_bin_path, content_root, options, exclude, require): """ :type args: EnvironmentConfig :type python_interpreter: str | None :type ansible_bin_path: str :type content_root: str :type options: dict[str, int] :type exclude: list[str] :type require: list[str] :rtype: list[str] """ options['--color'] = 1 cmd = [os.path.join(ansible_bin_path, 'ansible-test')] if python_interpreter: cmd = [python_interpreter] + cmd # Force the encoding used during delegation. # This is only needed because ansible-test relies on Python's file system encoding. # Environments that do not have the locale configured are thus unable to work with unicode file paths. # Examples include FreeBSD and some Linux containers. env_vars = dict( LC_ALL='en_US.UTF-8', ANSIBLE_TEST_CONTENT_ROOT=content_root, ) env_args = ['%s=%s' % (key, env_vars[key]) for key in sorted(env_vars)] cmd = ['/usr/bin/env'] + env_args + cmd cmd += list(filter_options(args, sys.argv[1:], options, exclude, require)) cmd += ['--color', 'yes' if args.color else 'no'] if args.requirements: cmd += ['--requirements'] if isinstance(args, ShellConfig): cmd = create_shell_command(cmd) elif isinstance(args, SanityConfig): if args.base_branch: cmd += ['--base-branch', args.base_branch] return cmd def filter_options(args, argv, options, exclude, require): """ :type args: EnvironmentConfig :type argv: list[str] :type options: dict[str, int] :type exclude: list[str] :type require: list[str] :rtype: collections.Iterable[str] """ options = options.copy() options['--requirements'] = 0 options['--truncate'] = 1 options['--redact'] = 0 if isinstance(args, TestConfig): options.update({ '--changed': 0, '--tracked': 0, '--untracked': 0, '--ignore-committed': 0, '--ignore-staged': 0, '--ignore-unstaged': 0, '--changed-from': 1, '--changed-path': 1, '--metadata': 1, '--exclude': 1, '--require': 1, }) elif isinstance(args, SanityConfig): options.update({ '--base-branch': 1, }) if isinstance(args, (NetworkIntegrationConfig, WindowsIntegrationConfig)): options.update({ '--inventory': 1, }) remaining = 0 for arg in argv: if not arg.startswith('-') and remaining: remaining -= 1 continue remaining = 0 parts = arg.split('=', 1) key = parts[0] if key in options: remaining = options[key] - len(parts) + 1 continue yield arg for arg in args.delegate_args: yield arg for target in exclude: yield '--exclude' yield target for target in require: yield '--require' yield target if isinstance(args, TestConfig): if args.metadata_path: yield '--metadata' yield args.metadata_path yield '--truncate' yield '%d' % args.truncate if args.redact: yield '--redact'
closed
ansible/ansible
https://github.com/ansible/ansible
61,946
ansible-test --venv fails to find its library
##### SUMMARY When installed from the ansible-2.9 PPA https://launchpad.net/~ansible/+archive/ubuntu/ansible-2.9 or from the nightly ansible rpms for RHEL7, ansible-test --venv fails to find its internal libraries ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9.0beta1 devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Ubuntu18 or CentOS7. I suspect that this is irrespective of the OS but may be related to how we are presently packaging. Ubuntu18 PPA and CentOS7 are both using python-2.7 with the separate python-virtualenv package at this time. ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> I did these steps in a fresh centos7 VM: Create a yum repo file for the nightlies: ``` $ cat ansible-nightly.repo (08:59:07) [ansible-nightly] name=Nightly Ansible packaging - $basearch baseurl=https://releases.ansible.com/ansible/rpm/nightly/devel/epel-7-$basearch/ failovermethod=priority enabled=0 #gpgcheck=1 #gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 ``` use the ansible-nightly repo to install ansible-test ``` sudo yum install ansible-test --enablerepo=ansible-nightly --nogpgcheck # ansible-test package and ansible package will be installed $ ansible --version ansible 2.10.0.dev0 config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/badger/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/site-packages/ansible executable location = /usr/bin/ansible python version = 2.7.5 (default, Jun 20 2019, 20:27:34) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] $ git clone git://github.com/ansible/ansible $ cd ansible $ ansible-test sanity --venv --test pep8 lib/ansible/module_utils/basic.py Traceback (most recent call last): File "/usr/bin/ansible-test", line 28, in <module> main() File "/usr/bin/ansible-test", line 22, in main from ansible_test._internal.cli import main as cli_main ImportError: No module named ansible_test._internal.cli ERROR: Command "/usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/home/badger/ansible LC_ALL=en_US.UTF-8 /var/tmp/tmpYdP0oT/python2.7 /usr/bin/ansible-test sanity --test pep8 lib/ansible/module_utils/basic.py --metadata metadata-X1O5jT.json --truncate 117 --color yes --requirements" returned exit status 1. ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS ansible-test works with the --venv switch ##### ACTUAL RESULTS I believe this is the venv directory: ```~/ansible/test/results/.tmp/delegation/python2.7``` Contents of site-packages there: ``` [pts/0@centos7-master ~/ansible/test/results/.tmp/delegation/python2.7]$ ls -l lib/python2.7/site-packages total 20 -rw-rw-r--. 1 badger badger 126 Sep 5 14:39 easy_install.py -rw-rw-r--. 1 badger badger 315 Sep 5 14:39 easy_install.pyc drwxrwxr-x. 10 badger badger 4096 Sep 5 14:39 pip drwxrwxr-x. 2 badger badger 155 Sep 5 14:39 pip-9.0.1.dist-info drwxrwxr-x. 4 badger badger 74 Sep 5 14:39 pkg_resources drwxrwxr-x. 4 badger badger 4096 Sep 5 14:39 setuptools drwxrwxr-x. 2 badger badger 199 Sep 5 14:39 setuptools-28.8.0.dist-info drwxrwxr-x. 5 badger badger 4096 Sep 5 14:39 wheel drwxrwxr-x. 2 badger badger 174 Sep 5 14:39 wheel-0.29.0.dist-info ``` Possible cause: ansible and/or ansible_test needs to be installed into the venv before ansible-test is re-invoked?
https://github.com/ansible/ansible/issues/61946
https://github.com/ansible/ansible/pull/62033
e3ea89801bae73921b298e7b4628860a272e942c
c77ab110514900ee439c7281a1d1dd14504cf44a
2019-09-06T16:53:27Z
python
2019-09-10T01:32:29Z
test/lib/ansible_test/_internal/sanity/__init__.py
"""Execute Ansible sanity tests.""" from __future__ import (absolute_import, division, print_function) __metaclass__ = type import abc import glob import json import os import re import collections from .. import types as t from ..util import ( ApplicationError, SubprocessError, display, import_plugins, load_plugins, parse_to_list_of_dict, ABC, ANSIBLE_TEST_DATA_ROOT, is_binary_file, read_lines_without_comments, get_available_python_versions, find_python, is_subdir, paths_to_dirs, get_ansible_version, ) from ..util_common import ( run_command, handle_layout_messages, ) from ..ansible_util import ( ansible_environment, check_pyyaml, ) from ..target import ( walk_internal_targets, walk_sanity_targets, TestTarget, ) from ..executor import ( get_changes_filter, AllTargetsSkipped, Delegate, install_command_requirements, SUPPORTED_PYTHON_VERSIONS, ) from ..config import ( SanityConfig, ) from ..test import ( TestSuccess, TestFailure, TestSkipped, TestMessage, calculate_best_confidence, ) from ..data import ( data_context, ) COMMAND = 'sanity' SANITY_ROOT = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'sanity') def command_sanity(args): """ :type args: SanityConfig """ handle_layout_messages(data_context().content.sanity_messages) changes = get_changes_filter(args) require = args.require + changes targets = SanityTargets.create(args.include, args.exclude, require) if not targets.include: raise AllTargetsSkipped() if args.delegate: raise Delegate(require=changes, exclude=args.exclude) install_command_requirements(args) tests = sanity_get_tests() if args.test: tests = [target for target in tests if target.name in args.test] else: disabled = [target.name for target in tests if not target.enabled and not args.allow_disabled] tests = [target for target in tests if target.enabled or args.allow_disabled] if disabled: display.warning('Skipping tests disabled by default without --allow-disabled: %s' % ', '.join(sorted(disabled))) if args.skip_test: tests = [target for target in tests if target.name not in args.skip_test] total = 0 failed = [] for test in tests: if args.list_tests: display.info(test.name) continue available_versions = sorted(get_available_python_versions(SUPPORTED_PYTHON_VERSIONS).keys()) if args.python: # specific version selected versions = (args.python,) elif isinstance(test, SanityMultipleVersion): # try all supported versions for multi-version tests when a specific version has not been selected versions = test.supported_python_versions elif not test.supported_python_versions or args.python_version in test.supported_python_versions: # the test works with any version or the version we're already running versions = (args.python_version,) else: # available versions supported by the test versions = tuple(sorted(set(available_versions) & set(test.supported_python_versions))) # use the lowest available version supported by the test or the current version as a fallback (which will be skipped) versions = versions[:1] or (args.python_version,) for version in versions: if isinstance(test, SanityMultipleVersion): skip_version = version else: skip_version = None options = '' if test.supported_python_versions and version not in test.supported_python_versions: display.warning("Skipping sanity test '%s' on unsupported Python %s." % (test.name, version)) result = SanitySkipped(test.name, skip_version) elif not args.python and version not in available_versions: display.warning("Skipping sanity test '%s' on Python %s due to missing interpreter." % (test.name, version)) result = SanitySkipped(test.name, skip_version) else: check_pyyaml(args, version) if test.supported_python_versions: display.info("Running sanity test '%s' with Python %s" % (test.name, version)) else: display.info("Running sanity test '%s'" % test.name) if isinstance(test, SanityCodeSmellTest): settings = test.load_processor(args) elif isinstance(test, SanityMultipleVersion): settings = test.load_processor(args, version) elif isinstance(test, SanitySingleVersion): settings = test.load_processor(args) elif isinstance(test, SanityVersionNeutral): settings = test.load_processor(args) else: raise Exception('Unsupported test type: %s' % type(test)) all_targets = targets.targets if test.all_targets: usable_targets = targets.targets elif test.no_targets: usable_targets = tuple() else: usable_targets = targets.include all_targets = SanityTargets.filter_and_inject_targets(test, all_targets) usable_targets = SanityTargets.filter_and_inject_targets(test, usable_targets) usable_targets = sorted(test.filter_targets(list(usable_targets))) usable_targets = settings.filter_skipped_targets(usable_targets) sanity_targets = SanityTargets(tuple(all_targets), tuple(usable_targets)) if usable_targets or test.no_targets: if isinstance(test, SanityCodeSmellTest): result = test.test(args, sanity_targets, version) elif isinstance(test, SanityMultipleVersion): result = test.test(args, sanity_targets, version) options = ' --python %s' % version elif isinstance(test, SanitySingleVersion): result = test.test(args, sanity_targets, version) elif isinstance(test, SanityVersionNeutral): result = test.test(args, sanity_targets) else: raise Exception('Unsupported test type: %s' % type(test)) else: result = SanitySkipped(test.name, skip_version) result.write(args) total += 1 if isinstance(result, SanityFailure): failed.append(result.test + options) if failed: message = 'The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % ( len(failed), total, '\n'.join(failed)) if args.failure_ok: display.error(message) else: raise ApplicationError(message) def collect_code_smell_tests(): # type: () -> t.Tuple[SanityFunc, ...] """Return a tuple of available code smell sanity tests.""" paths = glob.glob(os.path.join(SANITY_ROOT, 'code-smell', '*.py')) if data_context().content.is_ansible: # include Ansible specific code-smell tests which are not configured to be skipped ansible_code_smell_root = os.path.join(data_context().content.root, 'test', 'sanity', 'code-smell') skip_tests = read_lines_without_comments(os.path.join(ansible_code_smell_root, 'skip.txt'), remove_blank_lines=True, optional=True) paths.extend(path for path in glob.glob(os.path.join(ansible_code_smell_root, '*.py')) if os.path.basename(path) not in skip_tests) paths = sorted(p for p in paths if os.access(p, os.X_OK) and os.path.isfile(p)) tests = tuple(SanityCodeSmellTest(p) for p in paths) return tests def sanity_get_tests(): """ :rtype: tuple[SanityFunc] """ return SANITY_TESTS class SanityIgnoreParser: """Parser for the consolidated sanity test ignore file.""" NO_CODE = '_' def __init__(self, args): # type: (SanityConfig) -> None if data_context().content.collection: ansible_version = '%s.%s' % tuple(get_ansible_version().split('.')[:2]) ansible_label = 'Ansible %s' % ansible_version file_name = 'ignore-%s.txt' % ansible_version else: ansible_label = 'Ansible' file_name = 'ignore.txt' self.args = args self.relative_path = os.path.join(data_context().content.sanity_path, file_name) self.path = os.path.join(data_context().content.root, self.relative_path) self.ignores = collections.defaultdict(lambda: collections.defaultdict(dict)) # type: t.Dict[str, t.Dict[str, t.Dict[str, int]]] self.skips = collections.defaultdict(lambda: collections.defaultdict(int)) # type: t.Dict[str, t.Dict[str, int]] self.parse_errors = [] # type: t.List[t.Tuple[int, int, str]] self.file_not_found_errors = [] # type: t.List[t.Tuple[int, str]] lines = read_lines_without_comments(self.path, optional=True) targets = SanityTargets.get_targets() paths = set(target.path for target in targets) tests_by_name = {} # type: t.Dict[str, SanityTest] versioned_test_names = set() # type: t.Set[str] unversioned_test_names = {} # type: t.Dict[str, str] directories = paths_to_dirs(list(paths)) paths_by_test = {} # type: t.Dict[str, t.Set[str]] display.info('Read %d sanity test ignore line(s) for %s from: %s' % (len(lines), ansible_label, self.relative_path), verbosity=1) for test in sanity_get_tests(): test_targets = SanityTargets.filter_and_inject_targets(test, targets) paths_by_test[test.name] = set(target.path for target in test.filter_targets(test_targets)) if isinstance(test, SanityMultipleVersion): versioned_test_names.add(test.name) tests_by_name.update(dict(('%s-%s' % (test.name, python_version), test) for python_version in test.supported_python_versions)) else: unversioned_test_names.update(dict(('%s-%s' % (test.name, python_version), test.name) for python_version in SUPPORTED_PYTHON_VERSIONS)) tests_by_name[test.name] = test for line_no, line in enumerate(lines, start=1): if not line: self.parse_errors.append((line_no, 1, "Line cannot be empty or contain only a comment")) continue parts = line.split(' ') path = parts[0] codes = parts[1:] if not path: self.parse_errors.append((line_no, 1, "Line cannot start with a space")) continue if path.endswith(os.path.sep): if path not in directories: self.file_not_found_errors.append((line_no, path)) continue else: if path not in paths: self.file_not_found_errors.append((line_no, path)) continue if not codes: self.parse_errors.append((line_no, len(path), "Error code required after path")) continue code = codes[0] if not code: self.parse_errors.append((line_no, len(path) + 1, "Error code after path cannot be empty")) continue if len(codes) > 1: self.parse_errors.append((line_no, len(path) + len(code) + 2, "Error code cannot contain spaces")) continue parts = code.split('!') code = parts[0] commands = parts[1:] parts = code.split(':') test_name = parts[0] error_codes = parts[1:] test = tests_by_name.get(test_name) if not test: unversioned_name = unversioned_test_names.get(test_name) if unversioned_name: self.parse_errors.append((line_no, len(path) + len(unversioned_name) + 2, "Sanity test '%s' cannot use a Python version like '%s'" % ( unversioned_name, test_name))) elif test_name in versioned_test_names: self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires a Python version like '%s-%s'" % ( test_name, test_name, args.python_version))) else: self.parse_errors.append((line_no, len(path) + 2, "Sanity test '%s' does not exist" % test_name)) continue if path.endswith(os.path.sep) and not test.include_directories: self.parse_errors.append((line_no, 1, "Sanity test '%s' does not support directory paths" % test_name)) continue if path not in paths_by_test[test.name] and not test.no_targets: self.parse_errors.append((line_no, 1, "Sanity test '%s' does not test path '%s'" % (test_name, path))) continue if commands and error_codes: self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Error code cannot contain both '!' and ':' characters")) continue if commands: command = commands[0] if len(commands) > 1: self.parse_errors.append((line_no, len(path) + len(test_name) + len(command) + 3, "Error code cannot contain multiple '!' characters")) continue if command == 'skip': if not test.can_skip: self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' cannot be skipped" % test_name)) continue existing_line_no = self.skips.get(test_name, {}).get(path) if existing_line_no: self.parse_errors.append((line_no, 1, "Duplicate '%s' skip for path '%s' first found on line %d" % (test_name, path, existing_line_no))) continue self.skips[test_name][path] = line_no continue self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Command '!%s' not recognized" % command)) continue if not test.can_ignore: self.parse_errors.append((line_no, len(path) + 1, "Sanity test '%s' cannot be ignored" % test_name)) continue if test.error_code: if not error_codes: self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires an error code" % test_name)) continue error_code = error_codes[0] if len(error_codes) > 1: self.parse_errors.append((line_no, len(path) + len(test_name) + len(error_code) + 3, "Error code cannot contain multiple ':' characters")) continue else: if error_codes: self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' does not support error codes" % test_name)) continue error_code = self.NO_CODE existing = self.ignores.get(test_name, {}).get(path, {}).get(error_code) if existing: if test.error_code: self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for error code '%s' for path '%s' first found on line %d" % ( test_name, error_code, path, existing))) else: self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for path '%s' first found on line %d" % ( test_name, path, existing))) continue self.ignores[test_name][path][error_code] = line_no @staticmethod def load(args): # type: (SanityConfig) -> SanityIgnoreParser """Return the current SanityIgnore instance, initializing it if needed.""" try: return SanityIgnoreParser.instance except AttributeError: pass SanityIgnoreParser.instance = SanityIgnoreParser(args) return SanityIgnoreParser.instance class SanityIgnoreProcessor: """Processor for sanity test ignores for a single run of one sanity test.""" def __init__(self, args, # type: SanityConfig test, # type: SanityTest python_version, # type: t.Optional[str] ): # type: (...) -> None name = test.name code = test.error_code if python_version: full_name = '%s-%s' % (name, python_version) else: full_name = name self.args = args self.test = test self.code = code self.parser = SanityIgnoreParser.load(args) self.ignore_entries = self.parser.ignores.get(full_name, {}) self.skip_entries = self.parser.skips.get(full_name, {}) self.used_line_numbers = set() # type: t.Set[int] def filter_skipped_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] """Return the given targets, with any skipped paths filtered out.""" return sorted(target for target in targets if target.path not in self.skip_entries) def process_errors(self, errors, paths): # type: (t.List[SanityMessage], t.List[str]) -> t.List[SanityMessage] """Return the given errors filtered for ignores and with any settings related errors included.""" errors = self.filter_messages(errors) errors.extend(self.get_errors(paths)) errors = sorted(set(errors)) return errors def filter_messages(self, messages): # type: (t.List[SanityMessage]) -> t.List[SanityMessage] """Return a filtered list of the given messages using the entries that have been loaded.""" filtered = [] for message in messages: path_entry = self.ignore_entries.get(message.path) if path_entry: code = message.code if self.code else SanityIgnoreParser.NO_CODE line_no = path_entry.get(code) if line_no: self.used_line_numbers.add(line_no) continue filtered.append(message) return filtered def get_errors(self, paths): # type: (t.List[str]) -> t.List[SanityMessage] """Return error messages related to issues with the file.""" messages = [] # unused errors unused = [] # type: t.List[t.Tuple[int, str, str]] if self.test.no_targets or self.test.all_targets: # tests which do not accept a target list, or which use all targets, always return all possible errors, so all ignores can be checked targets = SanityTargets.get_targets() test_targets = SanityTargets.filter_and_inject_targets(self.test, targets) paths = [target.path for target in test_targets] for path in paths: path_entry = self.ignore_entries.get(path) if not path_entry: continue unused.extend((line_no, path, code) for code, line_no in path_entry.items() if line_no not in self.used_line_numbers) messages.extend(SanityMessage( code=self.code, message="Ignoring '%s' on '%s' is unnecessary" % (code, path) if self.code else "Ignoring '%s' is unnecessary" % path, path=self.parser.relative_path, line=line, column=1, confidence=calculate_best_confidence(((self.parser.path, line), (path, 0)), self.args.metadata) if self.args.metadata.changes else None, ) for line, path, code in unused) return messages class SanitySuccess(TestSuccess): """Sanity test success.""" def __init__(self, test, python_version=None): """ :type test: str :type python_version: str """ super(SanitySuccess, self).__init__(COMMAND, test, python_version) class SanitySkipped(TestSkipped): """Sanity test skipped.""" def __init__(self, test, python_version=None): """ :type test: str :type python_version: str """ super(SanitySkipped, self).__init__(COMMAND, test, python_version) class SanityFailure(TestFailure): """Sanity test failure.""" def __init__(self, test, python_version=None, messages=None, summary=None): """ :type test: str :type python_version: str :type messages: list[SanityMessage] :type summary: unicode """ super(SanityFailure, self).__init__(COMMAND, test, python_version, messages, summary) class SanityMessage(TestMessage): """Single sanity test message for one file.""" class SanityTargets: """Sanity test target information.""" def __init__(self, targets, include): # type: (t.Tuple[TestTarget], t.Tuple[TestTarget]) -> None self.targets = targets self.include = include @staticmethod def create(include, exclude, require): # type: (t.List[str], t.List[str], t.List[str]) -> SanityTargets """Create a SanityTargets instance from the given include, exclude and require lists.""" _targets = SanityTargets.get_targets() _include = walk_internal_targets(_targets, include, exclude, require) return SanityTargets(_targets, _include) @staticmethod def filter_and_inject_targets(test, targets): # type: (SanityTest, t.Iterable[TestTarget]) -> t.List[TestTarget] """Filter and inject targets based on test requirements and the given target list.""" test_targets = list(targets) if not test.include_symlinks: # remove all symlinks unless supported by the test test_targets = [target for target in test_targets if not target.symlink] if not test.include_directories or not test.include_symlinks: # exclude symlinked directories unless supported by the test test_targets = [target for target in test_targets if not target.path.endswith(os.path.sep)] if test.include_directories: # include directories containing any of the included files test_targets += tuple(TestTarget(path, None, None, '') for path in paths_to_dirs([target.path for target in test_targets])) if not test.include_symlinks: # remove all directory symlinks unless supported by the test test_targets = [target for target in test_targets if not target.symlink] return test_targets @staticmethod def get_targets(): # type: () -> t.Tuple[TestTarget, ...] """Return a tuple of sanity test targets. Uses a cached version when available.""" try: return SanityTargets.get_targets.targets except AttributeError: SanityTargets.get_targets.targets = tuple(sorted(walk_sanity_targets())) return SanityTargets.get_targets.targets class SanityTest(ABC): """Sanity test base class.""" __metaclass__ = abc.ABCMeta ansible_only = False def __init__(self, name): self.name = name self.enabled = True @property def error_code(self): # type: () -> t.Optional[str] """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes.""" return None @property def can_ignore(self): # type: () -> bool """True if the test supports ignore entries.""" return True @property def can_skip(self): # type: () -> bool """True if the test supports skip entries.""" return not self.all_targets and not self.no_targets @property def all_targets(self): # type: () -> bool """True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets.""" return False @property def no_targets(self): # type: () -> bool """True if the test does not use test targets. Mutually exclusive with all_targets.""" return False @property def include_directories(self): # type: () -> bool """True if the test targets should include directories.""" return False @property def include_symlinks(self): # type: () -> bool """True if the test targets should include symlinks.""" return False @property def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]] """A tuple of supported Python versions or None if the test does not depend on specific Python versions.""" return tuple(python_version for python_version in SUPPORTED_PYTHON_VERSIONS if python_version.startswith('3.')) def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] """Return the given list of test targets, filtered to include only those relevant for the test.""" if self.no_targets: return [] raise NotImplementedError('Sanity test "%s" must implement "filter_targets" or set "no_targets" to True.' % self.name) class SanityCodeSmellTest(SanityTest): """Sanity test script.""" def __init__(self, path): name = os.path.splitext(os.path.basename(path))[0] config_path = os.path.splitext(path)[0] + '.json' super(SanityCodeSmellTest, self).__init__(name) self.path = path self.config_path = config_path if os.path.exists(config_path) else None self.config = None if self.config_path: with open(self.config_path, 'r') as config_fd: self.config = json.load(config_fd) if self.config: self.enabled = not self.config.get('disabled') self.output = self.config.get('output') # type: t.Optional[str] self.extensions = self.config.get('extensions') # type: t.List[str] self.prefixes = self.config.get('prefixes') # type: t.List[str] self.files = self.config.get('files') # type: t.List[str] self.text = self.config.get('text') # type: t.Optional[bool] self.ignore_self = self.config.get('ignore_self') # type: bool self.__all_targets = self.config.get('all_targets') # type: bool self.__no_targets = self.config.get('no_targets') # type: bool self.__include_directories = self.config.get('include_directories') # type: bool self.__include_symlinks = self.config.get('include_symlinks') # type: bool else: self.output = None self.extensions = [] self.prefixes = [] self.files = [] self.text = None # type: t.Optional[bool] self.ignore_self = False self.__all_targets = False self.__no_targets = True self.__include_directories = False self.__include_symlinks = False if self.no_targets: mutually_exclusive = ( 'extensions', 'prefixes', 'files', 'text', 'ignore_self', 'all_targets', 'include_directories', 'include_symlinks', ) problems = sorted(name for name in mutually_exclusive if getattr(self, name)) if problems: raise ApplicationError('Sanity test "%s" option "no_targets" is mutually exclusive with options: %s' % (self.name, ', '.join(problems))) @property def all_targets(self): # type: () -> bool """True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets.""" return self.__all_targets @property def no_targets(self): # type: () -> bool """True if the test does not use test targets. Mutually exclusive with all_targets.""" return self.__no_targets @property def include_directories(self): # type: () -> bool """True if the test targets should include directories.""" return self.__include_directories @property def include_symlinks(self): # type: () -> bool """True if the test targets should include symlinks.""" return self.__include_symlinks def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] """Return the given list of test targets, filtered to include only those relevant for the test.""" if self.no_targets: return [] if self.text is not None: if self.text: targets = [target for target in targets if not is_binary_file(target.path)] else: targets = [target for target in targets if is_binary_file(target.path)] if self.extensions: targets = [target for target in targets if os.path.splitext(target.path)[1] in self.extensions or (is_subdir(target.path, 'bin') and '.py' in self.extensions)] if self.prefixes: targets = [target for target in targets if any(target.path.startswith(pre) for pre in self.prefixes)] if self.files: targets = [target for target in targets if os.path.basename(target.path) in self.files] if self.ignore_self and data_context().content.is_ansible: relative_self_path = os.path.relpath(self.path, data_context().content.root) targets = [target for target in targets if target.path != relative_self_path] return targets def test(self, args, targets, python_version): """ :type args: SanityConfig :type targets: SanityTargets :type python_version: str :rtype: TestResult """ cmd = [find_python(python_version), self.path] env = ansible_environment(args, color=False) pattern = None data = None settings = self.load_processor(args) paths = [target.path for target in targets.include] if self.config: if self.output == 'path-line-column-message': pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$' elif self.output == 'path-message': pattern = '^(?P<path>[^:]*): (?P<message>.*)$' else: pattern = ApplicationError('Unsupported output type: %s' % self.output) if not self.no_targets: data = '\n'.join(paths) if data: display.info(data, verbosity=4) try: stdout, stderr = run_command(args, cmd, data=data, env=env, capture=True) status = 0 except SubprocessError as ex: stdout = ex.stdout stderr = ex.stderr status = ex.status if args.explain: return SanitySuccess(self.name) if stdout and not stderr: if pattern: matches = parse_to_list_of_dict(pattern, stdout) messages = [SanityMessage( message=m['message'], path=m['path'], line=int(m.get('line', 0)), column=int(m.get('column', 0)), ) for m in matches] messages = settings.process_errors(messages, paths) if not messages: return SanitySuccess(self.name) return SanityFailure(self.name, messages=messages) if stderr or status: summary = u'%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout) return SanityFailure(self.name, summary=summary) messages = settings.process_errors([], paths) if messages: return SanityFailure(self.name, messages=messages) return SanitySuccess(self.name) def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor """Load the ignore processor for this sanity test.""" return SanityIgnoreProcessor(args, self, None) class SanityFunc(SanityTest): """Base class for sanity test plugins.""" def __init__(self): name = self.__class__.__name__ name = re.sub(r'Test$', '', name) # drop Test suffix name = re.sub(r'(.)([A-Z][a-z]+)', r'\1-\2', name).lower() # use dashes instead of capitalization super(SanityFunc, self).__init__(name) class SanityVersionNeutral(SanityFunc): """Base class for sanity test plugins which are idependent of the python version being used.""" @abc.abstractmethod def test(self, args, targets): """ :type args: SanityConfig :type targets: SanityTargets :rtype: TestResult """ def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor """Load the ignore processor for this sanity test.""" return SanityIgnoreProcessor(args, self, None) @property def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]] """A tuple of supported Python versions or None if the test does not depend on specific Python versions.""" return None class SanitySingleVersion(SanityFunc): """Base class for sanity test plugins which should run on a single python version.""" @abc.abstractmethod def test(self, args, targets, python_version): """ :type args: SanityConfig :type targets: SanityTargets :type python_version: str :rtype: TestResult """ def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor """Load the ignore processor for this sanity test.""" return SanityIgnoreProcessor(args, self, None) class SanityMultipleVersion(SanityFunc): """Base class for sanity test plugins which should run on multiple python versions.""" @abc.abstractmethod def test(self, args, targets, python_version): """ :type args: SanityConfig :type targets: SanityTargets :type python_version: str :rtype: TestResult """ def load_processor(self, args, python_version): # type: (SanityConfig, str) -> SanityIgnoreProcessor """Load the ignore processor for this sanity test.""" return SanityIgnoreProcessor(args, self, python_version) @property def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]] """A tuple of supported Python versions or None if the test does not depend on specific Python versions.""" return SUPPORTED_PYTHON_VERSIONS SANITY_TESTS = ( ) def sanity_init(): """Initialize full sanity test list (includes code-smell scripts determined at runtime).""" import_plugins('sanity') sanity_plugins = {} # type: t.Dict[str, t.Type[SanityFunc]] load_plugins(SanityFunc, sanity_plugins) sanity_tests = tuple([plugin() for plugin in sanity_plugins.values() if data_context().content.is_ansible or not plugin.ansible_only]) global SANITY_TESTS # pylint: disable=locally-disabled, global-statement SANITY_TESTS = tuple(sorted(sanity_tests + collect_code_smell_tests(), key=lambda k: k.name))
closed
ansible/ansible
https://github.com/ansible/ansible
61,946
ansible-test --venv fails to find its library
##### SUMMARY When installed from the ansible-2.9 PPA https://launchpad.net/~ansible/+archive/ubuntu/ansible-2.9 or from the nightly ansible rpms for RHEL7, ansible-test --venv fails to find its internal libraries ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below 2.9.0beta1 devel ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below N/A ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> Ubuntu18 or CentOS7. I suspect that this is irrespective of the OS but may be related to how we are presently packaging. Ubuntu18 PPA and CentOS7 are both using python-2.7 with the separate python-virtualenv package at this time. ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> I did these steps in a fresh centos7 VM: Create a yum repo file for the nightlies: ``` $ cat ansible-nightly.repo (08:59:07) [ansible-nightly] name=Nightly Ansible packaging - $basearch baseurl=https://releases.ansible.com/ansible/rpm/nightly/devel/epel-7-$basearch/ failovermethod=priority enabled=0 #gpgcheck=1 #gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 ``` use the ansible-nightly repo to install ansible-test ``` sudo yum install ansible-test --enablerepo=ansible-nightly --nogpgcheck # ansible-test package and ansible package will be installed $ ansible --version ansible 2.10.0.dev0 config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/badger/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/site-packages/ansible executable location = /usr/bin/ansible python version = 2.7.5 (default, Jun 20 2019, 20:27:34) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] $ git clone git://github.com/ansible/ansible $ cd ansible $ ansible-test sanity --venv --test pep8 lib/ansible/module_utils/basic.py Traceback (most recent call last): File "/usr/bin/ansible-test", line 28, in <module> main() File "/usr/bin/ansible-test", line 22, in main from ansible_test._internal.cli import main as cli_main ImportError: No module named ansible_test._internal.cli ERROR: Command "/usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/home/badger/ansible LC_ALL=en_US.UTF-8 /var/tmp/tmpYdP0oT/python2.7 /usr/bin/ansible-test sanity --test pep8 lib/ansible/module_utils/basic.py --metadata metadata-X1O5jT.json --truncate 117 --color yes --requirements" returned exit status 1. ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS ansible-test works with the --venv switch ##### ACTUAL RESULTS I believe this is the venv directory: ```~/ansible/test/results/.tmp/delegation/python2.7``` Contents of site-packages there: ``` [pts/0@centos7-master ~/ansible/test/results/.tmp/delegation/python2.7]$ ls -l lib/python2.7/site-packages total 20 -rw-rw-r--. 1 badger badger 126 Sep 5 14:39 easy_install.py -rw-rw-r--. 1 badger badger 315 Sep 5 14:39 easy_install.pyc drwxrwxr-x. 10 badger badger 4096 Sep 5 14:39 pip drwxrwxr-x. 2 badger badger 155 Sep 5 14:39 pip-9.0.1.dist-info drwxrwxr-x. 4 badger badger 74 Sep 5 14:39 pkg_resources drwxrwxr-x. 4 badger badger 4096 Sep 5 14:39 setuptools drwxrwxr-x. 2 badger badger 199 Sep 5 14:39 setuptools-28.8.0.dist-info drwxrwxr-x. 5 badger badger 4096 Sep 5 14:39 wheel drwxrwxr-x. 2 badger badger 174 Sep 5 14:39 wheel-0.29.0.dist-info ``` Possible cause: ansible and/or ansible_test needs to be installed into the venv before ansible-test is re-invoked?
https://github.com/ansible/ansible/issues/61946
https://github.com/ansible/ansible/pull/62033
e3ea89801bae73921b298e7b4628860a272e942c
c77ab110514900ee439c7281a1d1dd14504cf44a
2019-09-06T16:53:27Z
python
2019-09-10T01:32:29Z
test/lib/ansible_test/_internal/util_common.py
"""Common utility code that depends on CommonConfig.""" from __future__ import (absolute_import, division, print_function) __metaclass__ = type import atexit import contextlib import json import os import shutil import tempfile import textwrap from . import types as t from .util import ( common_environment, COVERAGE_CONFIG_NAME, display, find_python, is_shippable, MODE_DIRECTORY, MODE_FILE_EXECUTE, PYTHON_PATHS, raw_command, to_bytes, ANSIBLE_TEST_DATA_ROOT, make_dirs, ApplicationError, ) from .data import ( data_context, ) from .provider.layout import ( LayoutMessages, ) class ResultType: """Test result type.""" BOT = None # type: ResultType COVERAGE = None # type: ResultType DATA = None # type: ResultType JUNIT = None # type: ResultType LOGS = None # type: ResultType REPORTS = None # type: ResultType TMP = None # type: ResultType @staticmethod def _populate(): ResultType.BOT = ResultType('bot') ResultType.COVERAGE = ResultType('coverage') ResultType.DATA = ResultType('data') ResultType.JUNIT = ResultType('junit') ResultType.LOGS = ResultType('logs') ResultType.REPORTS = ResultType('reports') ResultType.TMP = ResultType('.tmp') def __init__(self, name): # type: (str) -> None self.name = name @property def relative_path(self): # type: () -> str """The content relative path to the results.""" return os.path.join(data_context().content.results_path, self.name) @property def path(self): # type: () -> str """The absolute path to the results.""" return os.path.join(data_context().content.root, self.relative_path) def __str__(self): # type: () -> str return self.name # noinspection PyProtectedMember ResultType._populate() # pylint: disable=protected-access class CommonConfig: """Configuration common to all commands.""" def __init__(self, args, command): """ :type args: any :type command: str """ self.command = command self.color = args.color # type: bool self.explain = args.explain # type: bool self.verbosity = args.verbosity # type: int self.debug = args.debug # type: bool self.truncate = args.truncate # type: int self.redact = args.redact # type: bool if is_shippable(): self.redact = True self.cache = {} def get_ansible_config(self): # type: () -> str """Return the path to the Ansible config for the given config.""" return os.path.join(ANSIBLE_TEST_DATA_ROOT, 'ansible.cfg') def handle_layout_messages(messages): # type: (t.Optional[LayoutMessages]) -> None """Display the given layout messages.""" if not messages: return for message in messages.info: display.info(message, verbosity=1) for message in messages.warning: display.warning(message) if messages.error: raise ApplicationError('\n'.join(messages.error)) @contextlib.contextmanager def named_temporary_file(args, prefix, suffix, directory, content): """ :param args: CommonConfig :param prefix: str :param suffix: str :param directory: str :param content: str | bytes | unicode :rtype: str """ if args.explain: yield os.path.join(directory, '%stemp%s' % (prefix, suffix)) else: with tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, dir=directory) as tempfile_fd: tempfile_fd.write(to_bytes(content)) tempfile_fd.flush() yield tempfile_fd.name def write_json_test_results(category, name, content): # type: (ResultType, str, t.Union[t.List[t.Any], t.Dict[str, t.Any]]) -> None """Write the given json content to the specified test results path, creating directories as needed.""" path = os.path.join(category.path, name) write_json_file(path, content, create_directories=True) def write_text_test_results(category, name, content): # type: (ResultType, str, str) -> None """Write the given text content to the specified test results path, creating directories as needed.""" path = os.path.join(category.path, name) write_text_file(path, content, create_directories=True) def write_json_file(path, content, create_directories=False): # type: (str, t.Union[t.List[t.Any], t.Dict[str, t.Any]], bool) -> None """Write the given json content to the specified path, optionally creating missing directories.""" text_content = json.dumps(content, sort_keys=True, indent=4) + '\n' write_text_file(path, text_content, create_directories=create_directories) def write_text_file(path, content, create_directories=False): # type: (str, str, bool) -> None """Write the given text content to the specified path, optionally creating missing directories.""" if create_directories: make_dirs(os.path.dirname(path)) with open(to_bytes(path), 'wb') as file: file.write(to_bytes(content)) def get_python_path(args, interpreter): """ :type args: TestConfig :type interpreter: str :rtype: str """ # When the python interpreter is already named "python" its directory can simply be added to the path. # Using another level of indirection is only required when the interpreter has a different name. if os.path.basename(interpreter) == 'python': return os.path.dirname(interpreter) python_path = PYTHON_PATHS.get(interpreter) if python_path: return python_path prefix = 'python-' suffix = '-ansible' root_temp_dir = '/tmp' if args.explain: return os.path.join(root_temp_dir, ''.join((prefix, 'temp', suffix))) python_path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir) injected_interpreter = os.path.join(python_path, 'python') # A symlink is faster than the execv wrapper, but isn't compatible with virtual environments. # Attempt to detect when it is safe to use a symlink by checking the real path of the interpreter. use_symlink = os.path.dirname(os.path.realpath(interpreter)) == os.path.dirname(interpreter) if use_symlink: display.info('Injecting "%s" as a symlink to the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1) os.symlink(interpreter, injected_interpreter) else: display.info('Injecting "%s" as a execv wrapper for the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1) code = textwrap.dedent(''' #!%s from __future__ import absolute_import from os import execv from sys import argv python = '%s' execv(python, [python] + argv[1:]) ''' % (interpreter, interpreter)).lstrip() write_text_file(injected_interpreter, code) os.chmod(injected_interpreter, MODE_FILE_EXECUTE) os.chmod(python_path, MODE_DIRECTORY) if not PYTHON_PATHS: atexit.register(cleanup_python_paths) PYTHON_PATHS[interpreter] = python_path return python_path def cleanup_python_paths(): """Clean up all temporary python directories.""" for path in sorted(PYTHON_PATHS.values()): display.info('Cleaning up temporary python directory: %s' % path, verbosity=2) shutil.rmtree(path) def get_coverage_environment(args, target_name, version, temp_path, module_coverage, remote_temp_path=None): """ :type args: TestConfig :type target_name: str :type version: str :type temp_path: str :type module_coverage: bool :type remote_temp_path: str | None :rtype: dict[str, str] """ if temp_path: # integration tests (both localhost and the optional testhost) # config and results are in a temporary directory coverage_config_base_path = temp_path coverage_output_base_path = temp_path elif args.coverage_config_base_path: # unit tests, sanity tests and other special cases (localhost only) # config is in a temporary directory # results are in the source tree coverage_config_base_path = args.coverage_config_base_path coverage_output_base_path = os.path.join(data_context().content.root, data_context().content.results_path) else: raise Exception('No temp path and no coverage config base path. Check for missing coverage_context usage.') config_file = os.path.join(coverage_config_base_path, COVERAGE_CONFIG_NAME) coverage_file = os.path.join(coverage_output_base_path, ResultType.COVERAGE.name, '%s=%s=%s=%s=coverage' % ( args.command, target_name, args.coverage_label or 'local-%s' % version, 'python-%s' % version)) if not args.explain and not os.path.exists(config_file): raise Exception('Missing coverage config file: %s' % config_file) if args.coverage_check: # cause the 'coverage' module to be found, but not imported or enabled coverage_file = '' # Enable code coverage collection on local Python programs (this does not include Ansible modules). # Used by the injectors to support code coverage. # Used by the pytest unit test plugin to support code coverage. # The COVERAGE_FILE variable is also used directly by the 'coverage' module. env = dict( COVERAGE_CONF=config_file, COVERAGE_FILE=coverage_file, ) if module_coverage: # Enable code coverage collection on Ansible modules (both local and remote). # Used by the AnsiballZ wrapper generator in lib/ansible/executor/module_common.py to support code coverage. env.update(dict( _ANSIBLE_COVERAGE_CONFIG=config_file, _ANSIBLE_COVERAGE_OUTPUT=coverage_file, )) if remote_temp_path: # Include the command, target and label so the remote host can create a filename with that info. The remote # is responsible for adding '={language version}=coverage.{hostname}.{pid}.{id}' env['_ANSIBLE_COVERAGE_REMOTE_OUTPUT'] = os.path.join(remote_temp_path, '%s=%s=%s' % ( args.command, target_name, args.coverage_label or 'remote')) env['_ANSIBLE_COVERAGE_REMOTE_WHITELIST'] = os.path.join(data_context().content.root, '*') return env def intercept_command(args, cmd, target_name, env, capture=False, data=None, cwd=None, python_version=None, temp_path=None, module_coverage=True, virtualenv=None, disable_coverage=False, remote_temp_path=None): """ :type args: TestConfig :type cmd: collections.Iterable[str] :type target_name: str :type env: dict[str, str] :type capture: bool :type data: str | None :type cwd: str | None :type python_version: str | None :type temp_path: str | None :type module_coverage: bool :type virtualenv: str | None :type disable_coverage: bool :type remote_temp_path: str | None :rtype: str | None, str | None """ if not env: env = common_environment() cmd = list(cmd) version = python_version or args.python_version interpreter = virtualenv or find_python(version) inject_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'injector') if not virtualenv: # injection of python into the path is required when not activating a virtualenv # otherwise scripts may find the wrong interpreter or possibly no interpreter python_path = get_python_path(args, interpreter) inject_path = python_path + os.path.pathsep + inject_path env['PATH'] = inject_path + os.path.pathsep + env['PATH'] env['ANSIBLE_TEST_PYTHON_VERSION'] = version env['ANSIBLE_TEST_PYTHON_INTERPRETER'] = interpreter if args.coverage and not disable_coverage: # add the necessary environment variables to enable code coverage collection env.update(get_coverage_environment(args, target_name, version, temp_path, module_coverage, remote_temp_path=remote_temp_path)) return run_command(args, cmd, capture=capture, env=env, data=data, cwd=cwd) def run_command(args, cmd, capture=False, env=None, data=None, cwd=None, always=False, stdin=None, stdout=None, cmd_verbosity=1, str_errors='strict'): """ :type args: CommonConfig :type cmd: collections.Iterable[str] :type capture: bool :type env: dict[str, str] | None :type data: str | None :type cwd: str | None :type always: bool :type stdin: file | None :type stdout: file | None :type cmd_verbosity: int :type str_errors: str :rtype: str | None, str | None """ explain = args.explain and not always return raw_command(cmd, capture=capture, env=env, data=data, cwd=cwd, explain=explain, stdin=stdin, stdout=stdout, cmd_verbosity=cmd_verbosity, str_errors=str_errors)
closed
ansible/ansible
https://github.com/ansible/ansible
61,411
openssh_keygen does not set permissions correctly on public key
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY In the openssh_keypair module, `set_fs_attributes_if_different` is not called on the public key, so the owner and group do not get set on the .pub file containing the public key. I haven't tested it or reasoned about the code, but this may only occur when trying to run the task when the public key already exists. <https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/crypto/openssh_keypair.py#L231> In this line, file_args only receives the private key file location. Additionally, there are "isPublicKeyValid" and "isPrivateKeyValid", and in the private key one we can see here <https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/crypto/openssh_keypair.py#L254> that we check the permissions for the private key, but there is no corresponding check in the public key. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> openssh_keypair ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.8.4 config file = <path_inside_home_directory>/ansible.cfg configured module search path = ['<home_directory>/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python3.7/site-packages/ansible executable location = /usr/bin/ansible python version = 3.7.4 (default, Jul 16 2019, 07:12:58) [GCC 9.1.0] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below DEFAULT_HOST_LIST(<path_inside_home_directory>/aws-remote-work/ansible.cfg) = ['<path_inside_home_directory>/aws-remote-work/hosts'] ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> host: Linux host-machine 5.2.9-arch1-1-ARCH #1 SMP PREEMPT Fri Aug 16 11:29:43 UTC 2019 x86_64 GNU/Linux remote: Linux remote-machine 4.9.0-9-amd64 #1 SMP Debian 4.9.168-1+deb9u5 (2019-08-11) x86_64 GNU/Linux ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> I am running the playbook with --become, so that it's executing as root <!--- Paste example playbooks or commands between quotes below --> ```yaml - name: create ssh keypair in user home directory openssh_keypair: path: "/home/{{user_name}}/.ssh/my_ssh_key" type: ecdsa owner: "{{user_name}}" group: "users" mode: "u=rw,g=,o=" ``` ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> my_ssh_key should have permissions like `-rw------- 1 user_name users` my_ssh_key.pub should have the same permissions: `-rw------- 1 user_name users` ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> only the private key has the correct permissions, and the public key has permissions `-rw-r--r-- 1 root root`
https://github.com/ansible/ansible/issues/61411
https://github.com/ansible/ansible/pull/61658
c77ab110514900ee439c7281a1d1dd14504cf44a
c19cea9b03f343a6c7da6607ce355ffa409e985a
2019-08-27T20:27:05Z
python
2019-09-10T04:44:04Z
changelogs/fragments/61658-openssh_keypair-public-key-permissions.yml
closed
ansible/ansible
https://github.com/ansible/ansible
61,411
openssh_keygen does not set permissions correctly on public key
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY In the openssh_keypair module, `set_fs_attributes_if_different` is not called on the public key, so the owner and group do not get set on the .pub file containing the public key. I haven't tested it or reasoned about the code, but this may only occur when trying to run the task when the public key already exists. <https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/crypto/openssh_keypair.py#L231> In this line, file_args only receives the private key file location. Additionally, there are "isPublicKeyValid" and "isPrivateKeyValid", and in the private key one we can see here <https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/crypto/openssh_keypair.py#L254> that we check the permissions for the private key, but there is no corresponding check in the public key. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> openssh_keypair ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.8.4 config file = <path_inside_home_directory>/ansible.cfg configured module search path = ['<home_directory>/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python3.7/site-packages/ansible executable location = /usr/bin/ansible python version = 3.7.4 (default, Jul 16 2019, 07:12:58) [GCC 9.1.0] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below DEFAULT_HOST_LIST(<path_inside_home_directory>/aws-remote-work/ansible.cfg) = ['<path_inside_home_directory>/aws-remote-work/hosts'] ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> host: Linux host-machine 5.2.9-arch1-1-ARCH #1 SMP PREEMPT Fri Aug 16 11:29:43 UTC 2019 x86_64 GNU/Linux remote: Linux remote-machine 4.9.0-9-amd64 #1 SMP Debian 4.9.168-1+deb9u5 (2019-08-11) x86_64 GNU/Linux ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> I am running the playbook with --become, so that it's executing as root <!--- Paste example playbooks or commands between quotes below --> ```yaml - name: create ssh keypair in user home directory openssh_keypair: path: "/home/{{user_name}}/.ssh/my_ssh_key" type: ecdsa owner: "{{user_name}}" group: "users" mode: "u=rw,g=,o=" ``` ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> my_ssh_key should have permissions like `-rw------- 1 user_name users` my_ssh_key.pub should have the same permissions: `-rw------- 1 user_name users` ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> only the private key has the correct permissions, and the public key has permissions `-rw-r--r-- 1 root root`
https://github.com/ansible/ansible/issues/61411
https://github.com/ansible/ansible/pull/61658
c77ab110514900ee439c7281a1d1dd14504cf44a
c19cea9b03f343a6c7da6607ce355ffa409e985a
2019-08-27T20:27:05Z
python
2019-09-10T04:44:04Z
lib/ansible/modules/crypto/openssh_keypair.py
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2018, David Kainz <[email protected]> <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: openssh_keypair author: "David Kainz (@lolcube)" version_added: "2.8" short_description: Generate OpenSSH private and public keys. description: - "This module allows one to (re)generate OpenSSH private and public keys. It uses ssh-keygen to generate keys. One can generate C(rsa), C(dsa), C(rsa1), C(ed25519) or C(ecdsa) private keys." requirements: - "ssh-keygen" options: state: description: - Whether the private and public keys should exist or not, taking action if the state is different from what is stated. type: str default: present choices: [ present, absent ] size: description: - "Specifies the number of bits in the private key to create. For RSA keys, the minimum size is 1024 bits and the default is 4096 bits. Generally, 2048 bits is considered sufficient. DSA keys must be exactly 1024 bits as specified by FIPS 186-2. For ECDSA keys, size determines the key length by selecting from one of three elliptic curve sizes: 256, 384 or 521 bits. Attempting to use bit lengths other than these three values for ECDSA keys will cause this module to fail. Ed25519 keys have a fixed length and the size will be ignored." type: int type: description: - "The algorithm used to generate the SSH private key. C(rsa1) is for protocol version 1. C(rsa1) is deprecated and may not be supported by every version of ssh-keygen." type: str default: rsa choices: ['rsa', 'dsa', 'rsa1', 'ecdsa', 'ed25519'] force: description: - Should the key be regenerated even if it already exists type: bool default: false path: description: - Name of the files containing the public and private key. The file containing the public key will have the extension C(.pub). type: path required: true comment: description: - Provides a new comment to the public key. When checking if the key is in the correct state this will be ignored. type: str version_added: "2.9" extends_documentation_fragment: files ''' EXAMPLES = ''' # Generate an OpenSSH keypair with the default values (4096 bits, rsa) - openssh_keypair: path: /tmp/id_ssh_rsa # Generate an OpenSSH rsa keypair with a different size (2048 bits) - openssh_keypair: path: /tmp/id_ssh_rsa size: 2048 # Force regenerate an OpenSSH keypair if it already exists - openssh_keypair: path: /tmp/id_ssh_rsa force: True # Generate an OpenSSH keypair with a different algorithm (dsa) - openssh_keypair: path: /tmp/id_ssh_dsa type: dsa ''' RETURN = ''' size: description: Size (in bits) of the SSH private key returned: changed or success type: int sample: 4096 type: description: Algorithm used to generate the SSH private key returned: changed or success type: str sample: rsa filename: description: Path to the generated SSH private key file returned: changed or success type: str sample: /tmp/id_ssh_rsa fingerprint: description: The fingerprint of the key. returned: changed or success type: str sample: SHA256:r4YCZxihVjedH2OlfjVGI6Y5xAYtdCwk8VxKyzVyYfM public_key: description: The public key of the generated SSH private key returned: changed or success type: str sample: ssh-rsa AAAAB3Nza(...omitted...)veL4E3Xcw== test_key comment: description: The comment of the generated key returned: changed or success type: str sample: test@comment ''' import os import stat import errno from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native class KeypairError(Exception): pass class Keypair(object): def __init__(self, module): self.path = module.params['path'] self.state = module.params['state'] self.force = module.params['force'] self.size = module.params['size'] self.type = module.params['type'] self.comment = module.params['comment'] self.changed = False self.check_mode = module.check_mode self.privatekey = None self.fingerprint = {} self.public_key = {} if self.type in ('rsa', 'rsa1'): self.size = 4096 if self.size is None else self.size if self.size < 1024: module.fail_json(msg=('For RSA keys, the minimum size is 1024 bits and the default is 4096 bits. ' 'Attempting to use bit lengths under 1024 will cause the module to fail.')) if self.type == 'dsa': self.size = 1024 if self.size is None else self.size if self.size != 1024: module.fail_json(msg=('DSA keys must be exactly 1024 bits as specified by FIPS 186-2.')) if self.type == 'ecdsa': self.size = 256 if self.size is None else self.size if self.size not in (256, 384, 521): module.fail_json(msg=('For ECDSA keys, size determines the key length by selecting from ' 'one of three elliptic curve sizes: 256, 384 or 521 bits. ' 'Attempting to use bit lengths other than these three values for ' 'ECDSA keys will cause this module to fail. ')) if self.type == 'ed25519': self.size = 256 def generate(self, module): # generate a keypair if not self.isPrivateKeyValid(module, perms_required=False) or self.force: args = [ module.get_bin_path('ssh-keygen', True), '-q', '-N', '', '-b', str(self.size), '-t', self.type, '-f', self.path, ] if self.comment: args.extend(['-C', self.comment]) else: args.extend(['-C', ""]) try: if os.path.exists(self.path) and not os.access(self.path, os.W_OK): os.chmod(self.path, stat.S_IWUSR + stat.S_IRUSR) self.changed = True stdin_data = None if os.path.exists(self.path): stdin_data = 'y' module.run_command(args, data=stdin_data) proc = module.run_command([module.get_bin_path('ssh-keygen', True), '-lf', self.path]) self.fingerprint = proc[1].split() pubkey = module.run_command([module.get_bin_path('ssh-keygen', True), '-yf', self.path]) self.public_key = pubkey[1].strip('\n') except Exception as e: self.remove() module.fail_json(msg="%s" % to_native(e)) elif not self.isPublicKeyValid(module): pubkey = module.run_command([module.get_bin_path('ssh-keygen', True), '-yf', self.path]) pubkey = pubkey[1].strip('\n') try: self.changed = True with open(self.path + ".pub", "w") as pubkey_f: pubkey_f.write(pubkey + '\n') os.chmod(self.path + ".pub", stat.S_IWUSR + stat.S_IRUSR + stat.S_IRGRP + stat.S_IROTH) except IOError: module.fail_json( msg='The public key is missing or does not match the private key. ' 'Unable to regenerate the public key.') self.public_key = pubkey if self.comment: try: if os.path.exists(self.path) and not os.access(self.path, os.W_OK): os.chmod(self.path, stat.S_IWUSR + stat.S_IRUSR) args = [module.get_bin_path('ssh-keygen', True), '-q', '-o', '-c', '-C', self.comment, '-f', self.path] module.run_command(args) except IOError: module.fail_json( msg='Unable to update the comment for the public key.') file_args = module.load_file_common_arguments(module.params) if module.set_fs_attributes_if_different(file_args, False): self.changed = True def isPrivateKeyValid(self, module, perms_required=True): # check if the key is correct def _check_state(): return os.path.exists(self.path) if _check_state(): proc = module.run_command([module.get_bin_path('ssh-keygen', True), '-lf', self.path], check_rc=False) if not proc[0] == 0: if os.path.isdir(self.path): module.fail_json(msg='%s is a directory. Please specify a path to a file.' % (self.path)) return False fingerprint = proc[1].split() keysize = int(fingerprint[0]) keytype = fingerprint[-1][1:-1].lower() else: return False def _check_perms(module): file_args = module.load_file_common_arguments(module.params) return not module.set_fs_attributes_if_different(file_args, False) def _check_type(): return self.type == keytype def _check_size(): return self.size == keysize self.fingerprint = fingerprint if not perms_required: return _check_state() and _check_type() and _check_size() return _check_state() and _check_perms(module) and _check_type() and _check_size() def isPublicKeyValid(self, module): def _get_pubkey_content(): if os.path.exists(self.path + ".pub"): with open(self.path + ".pub", "r") as pubkey_f: present_pubkey = pubkey_f.read().strip(' \n') return present_pubkey else: return False def _parse_pubkey(): pubkey_content = _get_pubkey_content() if pubkey_content: parts = pubkey_content.split(' ', 2) return parts[0], parts[1], '' if len(parts) <= 2 else parts[2] return False def _pubkey_valid(pubkey): if pubkey_parts: current_pubkey = ' '.join([pubkey_parts[0], pubkey_parts[1]]) return current_pubkey == pubkey return False def _comment_valid(): if pubkey_parts: return pubkey_parts[2] == self.comment return False pubkey = module.run_command([module.get_bin_path('ssh-keygen', True), '-yf', self.path]) pubkey = pubkey[1].strip('\n') pubkey_parts = _parse_pubkey() if _pubkey_valid(pubkey): self.public_key = pubkey if not self.comment: return _pubkey_valid(pubkey) return _pubkey_valid(pubkey) and _comment_valid() def dump(self): # return result as a dict """Serialize the object into a dictionary.""" result = { 'changed': self.changed, 'size': self.size, 'type': self.type, 'filename': self.path, # On removal this has no value 'fingerprint': self.fingerprint[1] if self.fingerprint else '', 'public_key': self.public_key, 'comment': self.comment if self.comment else '', } return result def remove(self): """Remove the resource from the filesystem.""" try: os.remove(self.path) self.changed = True except OSError as exc: if exc.errno != errno.ENOENT: raise KeypairError(exc) else: pass if os.path.exists(self.path + ".pub"): try: os.remove(self.path + ".pub") self.changed = True except OSError as exc: if exc.errno != errno.ENOENT: raise KeypairError(exc) else: pass def main(): # Define Ansible Module module = AnsibleModule( argument_spec=dict( state=dict(type='str', default='present', choices=['present', 'absent']), size=dict(type='int'), type=dict(type='str', default='rsa', choices=['rsa', 'dsa', 'rsa1', 'ecdsa', 'ed25519']), force=dict(type='bool', default=False), path=dict(type='path', required=True), comment=dict(type='str'), ), supports_check_mode=True, add_file_common_args=True, ) # Check if Path exists base_dir = os.path.dirname(module.params['path']) or '.' if not os.path.isdir(base_dir): module.fail_json( name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir ) keypair = Keypair(module) if keypair.state == 'present': if module.check_mode: result = keypair.dump() result['changed'] = module.params['force'] or not keypair.isPrivateKeyValid(module) or not keypair.isPublicKeyValid(module) module.exit_json(**result) try: keypair.generate(module) except Exception as exc: module.fail_json(msg=to_native(exc)) else: if module.check_mode: keypair.changed = os.path.exists(module.params['path']) if keypair.changed: keypair.fingerprint = {} result = keypair.dump() module.exit_json(**result) try: keypair.remove() except Exception as exc: module.fail_json(msg=to_native(exc)) result = keypair.dump() module.exit_json(**result) if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,672
jenkins_plugin: use of authorization crumb must be conducted in a session since LTS 2.176
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY Plugins cannot be installed using the jenkins_plugin module since upgrading to LTS 2.176. This is due to additional CSRF security where the crumb that is issued may only be used within the same session. In the jenkins log messages like this are reported: ``` WARNING: Found invalid crumb 5cae269d58b83057dfbd85f6c32f0845. Will check remaining parameters for a valid one... ``` The module will then error with `Cannot install plugin.` ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME jenkins_plugin ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ``` ansible 2.8.4 config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/james/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/dist-packages/ansible executable location = /usr/bin/ansible python version = 2.7.16 (default, Apr 6 2019, 01:42:57) [GCC 8.3.0] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below (no output) ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Install jenkins 2.176, attempt to install plugins using ansible, e.g. a task such as: ``` - name: "Install Jenkins plugins" jenkins_plugin: owner: jenkins group: jenkins name: "{{ item }}" url: "https://{{ ansible_host }}:8090/" url_username: "{{ ansible_user }}" url_password: "{{ ansible_become_pass }}" force_basic_auth: yes with_items: - ansicolor ``` <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> The named plugin is installed in jenkins. ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61672
https://github.com/ansible/ansible/pull/61673
dfc023209fc7460c449b2e97feb1b5e6d79d1491
76b5b90bd6fdf4ac3fb785f81c5fab18445db2e2
2019-09-02T09:46:51Z
python
2019-09-10T13:44:37Z
lib/ansible/module_utils/urls.py
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c), Michael DeHaan <[email protected]>, 2012-2013 # Copyright (c), Toshio Kuratomi <[email protected]>, 2015 # # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) # # The match_hostname function and supporting code is under the terms and # conditions of the Python Software Foundation License. They were taken from # the Python3 standard library and adapted for use in Python2. See comments in the # source for which code precisely is under this License. # # PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0) ''' The **urls** utils module offers a replacement for the urllib2 python library. urllib2 is the python stdlib way to retrieve files from the Internet but it lacks some security features (around verifying SSL certificates) that users should care about in most situations. Using the functions in this module corrects deficiencies in the urllib2 module wherever possible. There are also third-party libraries (for instance, requests) which can be used to replace urllib2 with a more secure library. However, all third party libraries require that the library be installed on the managed machine. That is an extra step for users making use of a module. If possible, avoid third party libraries by using this code instead. ''' import atexit import base64 import functools import netrc import os import platform import re import socket import sys import tempfile import traceback from contextlib import contextmanager try: import httplib except ImportError: # Python 3 import http.client as httplib import ansible.module_utils.six.moves.http_cookiejar as cookiejar import ansible.module_utils.six.moves.urllib.request as urllib_request import ansible.module_utils.six.moves.urllib.error as urllib_error from ansible.module_utils.six import PY3 from ansible.module_utils.basic import get_distribution from ansible.module_utils._text import to_bytes, to_native, to_text try: # python3 import urllib.request as urllib_request from urllib.request import AbstractHTTPHandler except ImportError: # python2 import urllib2 as urllib_request from urllib2 import AbstractHTTPHandler urllib_request.HTTPRedirectHandler.http_error_308 = urllib_request.HTTPRedirectHandler.http_error_307 try: from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse HAS_URLPARSE = True except Exception: HAS_URLPARSE = False try: import ssl HAS_SSL = True except Exception: HAS_SSL = False try: # SNI Handling needs python2.7.9's SSLContext from ssl import create_default_context, SSLContext HAS_SSLCONTEXT = True except ImportError: HAS_SSLCONTEXT = False # SNI Handling for python < 2.7.9 with urllib3 support try: # urllib3>=1.15 HAS_URLLIB3_SSL_WRAP_SOCKET = False try: from urllib3.contrib.pyopenssl import PyOpenSSLContext except ImportError: from requests.packages.urllib3.contrib.pyopenssl import PyOpenSSLContext HAS_URLLIB3_PYOPENSSLCONTEXT = True except ImportError: # urllib3<1.15,>=1.6 HAS_URLLIB3_PYOPENSSLCONTEXT = False try: try: from urllib3.contrib.pyopenssl import ssl_wrap_socket except ImportError: from requests.packages.urllib3.contrib.pyopenssl import ssl_wrap_socket HAS_URLLIB3_SSL_WRAP_SOCKET = True except ImportError: pass # Select a protocol that includes all secure tls protocols # Exclude insecure ssl protocols if possible if HAS_SSL: # If we can't find extra tls methods, ssl.PROTOCOL_TLSv1 is sufficient PROTOCOL = ssl.PROTOCOL_TLSv1 if not HAS_SSLCONTEXT and HAS_SSL: try: import ctypes import ctypes.util except ImportError: # python 2.4 (likely rhel5 which doesn't have tls1.1 support in its openssl) pass else: libssl_name = ctypes.util.find_library('ssl') libssl = ctypes.CDLL(libssl_name) for method in ('TLSv1_1_method', 'TLSv1_2_method'): try: libssl[method] # Found something - we'll let openssl autonegotiate and hope # the server has disabled sslv2 and 3. best we can do. PROTOCOL = ssl.PROTOCOL_SSLv23 break except AttributeError: pass del libssl # The following makes it easier for us to script updates of the bundled backports.ssl_match_hostname # The bundled backports.ssl_match_hostname should really be moved into its own file for processing _BUNDLED_METADATA = {"pypi_name": "backports.ssl_match_hostname", "version": "3.7.0.1"} LOADED_VERIFY_LOCATIONS = set() HAS_MATCH_HOSTNAME = True try: from ssl import match_hostname, CertificateError except ImportError: try: from backports.ssl_match_hostname import match_hostname, CertificateError except ImportError: HAS_MATCH_HOSTNAME = False try: import urllib_gssapi HAS_GSSAPI = True except ImportError: HAS_GSSAPI = False if not HAS_MATCH_HOSTNAME: # The following block of code is under the terms and conditions of the # Python Software Foundation License """The match_hostname() function from Python 3.4, essential when using SSL.""" try: # Divergence: Python-3.7+'s _ssl has this exception type but older Pythons do not from _ssl import SSLCertVerificationError CertificateError = SSLCertVerificationError except ImportError: class CertificateError(ValueError): pass def _dnsname_match(dn, hostname): """Matching according to RFC 6125, section 6.4.3 - Hostnames are compared lower case. - For IDNA, both dn and hostname must be encoded as IDN A-label (ACE). - Partial wildcards like 'www*.example.org', multiple wildcards, sole wildcard or wildcards in labels other then the left-most label are not supported and a CertificateError is raised. - A wildcard must match at least one character. """ if not dn: return False wildcards = dn.count('*') # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() if wildcards > 1: # Divergence .format() to percent formatting for Python < 2.6 raise CertificateError( "too many wildcards in certificate DNS name: %s" % repr(dn)) dn_leftmost, sep, dn_remainder = dn.partition('.') if '*' in dn_remainder: # Only match wildcard in leftmost segment. # Divergence .format() to percent formatting for Python < 2.6 raise CertificateError( "wildcard can only be present in the leftmost label: " "%s." % repr(dn)) if not sep: # no right side # Divergence .format() to percent formatting for Python < 2.6 raise CertificateError( "sole wildcard without additional labels are not support: " "%s." % repr(dn)) if dn_leftmost != '*': # no partial wildcard matching # Divergence .format() to percent formatting for Python < 2.6 raise CertificateError( "partial wildcards in leftmost label are not supported: " "%s." % repr(dn)) hostname_leftmost, sep, hostname_remainder = hostname.partition('.') if not hostname_leftmost or not sep: # wildcard must match at least one char return False return dn_remainder.lower() == hostname_remainder.lower() def _inet_paton(ipname): """Try to convert an IP address to packed binary form Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6 support. """ # inet_aton() also accepts strings like '1' # Divergence: We make sure we have native string type for all python versions try: b_ipname = to_bytes(ipname, errors='strict') except UnicodeError: raise ValueError("%s must be an all-ascii string." % repr(ipname)) # Set ipname in native string format if sys.version_info < (3,): n_ipname = b_ipname else: n_ipname = ipname if n_ipname.count('.') == 3: try: return socket.inet_aton(n_ipname) # Divergence: OSError on late python3. socket.error earlier. # Null bytes generate ValueError on python3(we want to raise # ValueError anyway), TypeError # earlier except (OSError, socket.error, TypeError): pass try: return socket.inet_pton(socket.AF_INET6, n_ipname) # Divergence: OSError on late python3. socket.error earlier. # Null bytes generate ValueError on python3(we want to raise # ValueError anyway), TypeError # earlier except (OSError, socket.error, TypeError): # Divergence .format() to percent formatting for Python < 2.6 raise ValueError("%s is neither an IPv4 nor an IP6 " "address." % repr(ipname)) except AttributeError: # AF_INET6 not available pass # Divergence .format() to percent formatting for Python < 2.6 raise ValueError("%s is not an IPv4 address." % repr(ipname)) def _ipaddress_match(ipname, host_ip): """Exact matching of IP addresses. RFC 6125 explicitly doesn't define an algorithm for this (section 1.7.2 - "Out of Scope"). """ # OpenSSL may add a trailing newline to a subjectAltName's IP address ip = _inet_paton(ipname.rstrip()) return ip == host_ip def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed. The function matches IP addresses rather than dNSNames if hostname is a valid ipaddress string. IPv4 addresses are supported on all platforms. IPv6 addresses are supported on platforms with IPv6 support (AF_INET6 and inet_pton). CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate, match_hostname needs a " "SSL socket or SSL context with either " "CERT_OPTIONAL or CERT_REQUIRED") try: # Divergence: Deal with hostname as bytes host_ip = _inet_paton(to_text(hostname, errors='strict')) except UnicodeError: # Divergence: Deal with hostname as byte strings. # IP addresses should be all ascii, so we consider it not # an IP address if this fails host_ip = None except ValueError: # Not an IP address (common case) host_ip = None dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if host_ip is None and _dnsname_match(value, hostname): return dnsnames.append(value) elif key == 'IP Address': if host_ip is not None and _ipaddress_match(value, host_ip): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or subjectAltName fields were found") # End of Python Software Foundation Licensed code HAS_MATCH_HOSTNAME = True # This is a dummy cacert provided for macOS since you need at least 1 # ca cert, regardless of validity, for Python on macOS to use the # keychain functionality in OpenSSL for validating SSL certificates. # See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher b_DUMMY_CA_CERT = b"""-----BEGIN CERTIFICATE----- MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9 gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1 4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg= -----END CERTIFICATE----- """ # # Exceptions # class ConnectionError(Exception): """Failed to connect to the server""" pass class ProxyError(ConnectionError): """Failure to connect because of a proxy""" pass class SSLValidationError(ConnectionError): """Failure to connect due to SSL validation failing""" pass class NoSSLError(SSLValidationError): """Needed to connect to an HTTPS url but no ssl library available to verify the certificate""" pass # Some environments (Google Compute Engine's CoreOS deploys) do not compile # against openssl and thus do not have any HTTPS support. CustomHTTPSConnection = None CustomHTTPSHandler = None HTTPSClientAuthHandler = None UnixHTTPSConnection = None if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib_request, 'HTTPSHandler'): class CustomHTTPSConnection(httplib.HTTPSConnection): def __init__(self, *args, **kwargs): httplib.HTTPSConnection.__init__(self, *args, **kwargs) self.context = None if HAS_SSLCONTEXT: self.context = self._context elif HAS_URLLIB3_PYOPENSSLCONTEXT: self.context = self._context = PyOpenSSLContext(PROTOCOL) if self.context and self.cert_file: self.context.load_cert_chain(self.cert_file, self.key_file) def connect(self): "Connect to a host on a given (SSL) port." if hasattr(self, 'source_address'): sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) else: sock = socket.create_connection((self.host, self.port), self.timeout) server_hostname = self.host # Note: self._tunnel_host is not available on py < 2.6 but this code # isn't used on py < 2.6 (lack of create_connection) if self._tunnel_host: self.sock = sock self._tunnel() server_hostname = self._tunnel_host if HAS_SSLCONTEXT or HAS_URLLIB3_PYOPENSSLCONTEXT: self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname) elif HAS_URLLIB3_SSL_WRAP_SOCKET: self.sock = ssl_wrap_socket(sock, keyfile=self.key_file, cert_reqs=ssl.CERT_NONE, certfile=self.cert_file, ssl_version=PROTOCOL, server_hostname=server_hostname) else: self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL) class CustomHTTPSHandler(urllib_request.HTTPSHandler): def https_open(self, req): kwargs = {} if HAS_SSLCONTEXT: kwargs['context'] = self._context return self.do_open( functools.partial( CustomHTTPSConnection, **kwargs ), req ) https_request = AbstractHTTPHandler.do_request_ class HTTPSClientAuthHandler(urllib_request.HTTPSHandler): '''Handles client authentication via cert/key This is a fairly lightweight extension on HTTPSHandler, and can be used in place of HTTPSHandler ''' def __init__(self, client_cert=None, client_key=None, unix_socket=None, **kwargs): urllib_request.HTTPSHandler.__init__(self, **kwargs) self.client_cert = client_cert self.client_key = client_key self._unix_socket = unix_socket def https_open(self, req): return self.do_open(self._build_https_connection, req) def _build_https_connection(self, host, **kwargs): kwargs.update({ 'cert_file': self.client_cert, 'key_file': self.client_key, }) try: kwargs['context'] = self._context except AttributeError: pass if self._unix_socket: return UnixHTTPSConnection(self._unix_socket)(host, **kwargs) return httplib.HTTPSConnection(host, **kwargs) @contextmanager def unix_socket_patch_httpconnection_connect(): '''Monkey patch ``httplib.HTTPConnection.connect`` to be ``UnixHTTPConnection.connect`` so that when calling ``super(UnixHTTPSConnection, self).connect()`` we get the correct behavior of creating self.sock for the unix socket ''' _connect = httplib.HTTPConnection.connect httplib.HTTPConnection.connect = UnixHTTPConnection.connect yield httplib.HTTPConnection.connect = _connect class UnixHTTPSConnection(httplib.HTTPSConnection): def __init__(self, unix_socket): self._unix_socket = unix_socket def connect(self): # This method exists simply to ensure we monkeypatch # httplib.HTTPConnection.connect to call UnixHTTPConnection.connect with unix_socket_patch_httpconnection_connect(): # Disable pylint check for the super() call. It complains about UnixHTTPSConnection # being a NoneType because of the initial definition above, but it won't actually # be a NoneType when this code runs # pylint: disable=bad-super-call super(UnixHTTPSConnection, self).connect() def __call__(self, *args, **kwargs): httplib.HTTPSConnection.__init__(self, *args, **kwargs) return self class UnixHTTPConnection(httplib.HTTPConnection): '''Handles http requests to a unix socket file''' def __init__(self, unix_socket): self._unix_socket = unix_socket def connect(self): self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: self.sock.connect(self._unix_socket) except OSError as e: raise OSError('Invalid Socket File (%s): %s' % (self._unix_socket, e)) if self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: self.sock.settimeout(self.timeout) def __call__(self, *args, **kwargs): httplib.HTTPConnection.__init__(self, *args, **kwargs) return self class UnixHTTPHandler(urllib_request.HTTPHandler): '''Handler for Unix urls''' def __init__(self, unix_socket, **kwargs): urllib_request.HTTPHandler.__init__(self, **kwargs) self._unix_socket = unix_socket def http_open(self, req): return self.do_open(UnixHTTPConnection(self._unix_socket), req) class ParseResultDottedDict(dict): ''' A dict that acts similarly to the ParseResult named tuple from urllib ''' def __init__(self, *args, **kwargs): super(ParseResultDottedDict, self).__init__(*args, **kwargs) self.__dict__ = self def as_list(self): ''' Generate a list from this dict, that looks like the ParseResult named tuple ''' return [self.get(k, None) for k in ('scheme', 'netloc', 'path', 'params', 'query', 'fragment')] def generic_urlparse(parts): ''' Returns a dictionary of url parts as parsed by urlparse, but accounts for the fact that older versions of that library do not support named attributes (ie. .netloc) ''' generic_parts = ParseResultDottedDict() if hasattr(parts, 'netloc'): # urlparse is newer, just read the fields straight # from the parts object generic_parts['scheme'] = parts.scheme generic_parts['netloc'] = parts.netloc generic_parts['path'] = parts.path generic_parts['params'] = parts.params generic_parts['query'] = parts.query generic_parts['fragment'] = parts.fragment generic_parts['username'] = parts.username generic_parts['password'] = parts.password hostname = parts.hostname if hostname and hostname[0] == '[' and '[' in parts.netloc and ']' in parts.netloc: # Py2.6 doesn't parse IPv6 addresses correctly hostname = parts.netloc.split(']')[0][1:].lower() generic_parts['hostname'] = hostname try: port = parts.port except ValueError: # Py2.6 doesn't parse IPv6 addresses correctly netloc = parts.netloc.split('@')[-1].split(']')[-1] if ':' in netloc: port = netloc.split(':')[1] if port: port = int(port) else: port = None generic_parts['port'] = port else: # we have to use indexes, and then parse out # the other parts not supported by indexing generic_parts['scheme'] = parts[0] generic_parts['netloc'] = parts[1] generic_parts['path'] = parts[2] generic_parts['params'] = parts[3] generic_parts['query'] = parts[4] generic_parts['fragment'] = parts[5] # get the username, password, etc. try: netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$') match = netloc_re.match(parts[1]) auth = match.group(1) hostname = match.group(2) port = match.group(3) if port: # the capture group for the port will include the ':', # so remove it and convert the port to an integer port = int(port[1:]) if auth: # the capture group above includes the @, so remove it # and then split it up based on the first ':' found auth = auth[:-1] username, password = auth.split(':', 1) else: username = password = None generic_parts['username'] = username generic_parts['password'] = password generic_parts['hostname'] = hostname generic_parts['port'] = port except Exception: generic_parts['username'] = None generic_parts['password'] = None generic_parts['hostname'] = parts[1] generic_parts['port'] = None return generic_parts class RequestWithMethod(urllib_request.Request): ''' Workaround for using DELETE/PUT/etc with urllib2 Originally contained in library/net_infrastructure/dnsmadeeasy ''' def __init__(self, url, method, data=None, headers=None, origin_req_host=None, unverifiable=True): if headers is None: headers = {} self._method = method.upper() urllib_request.Request.__init__(self, url, data, headers, origin_req_host, unverifiable) def get_method(self): if self._method: return self._method else: return urllib_request.Request.get_method(self) def RedirectHandlerFactory(follow_redirects=None, validate_certs=True, ca_path=None): """This is a class factory that closes over the value of ``follow_redirects`` so that the RedirectHandler class has access to that value without having to use globals, and potentially cause problems where ``open_url`` or ``fetch_url`` are used multiple times in a module. """ class RedirectHandler(urllib_request.HTTPRedirectHandler): """This is an implementation of a RedirectHandler to match the functionality provided by httplib2. It will utilize the value of ``follow_redirects`` that is passed into ``RedirectHandlerFactory`` to determine how redirects should be handled in urllib2. """ def redirect_request(self, req, fp, code, msg, hdrs, newurl): if not HAS_SSLCONTEXT: handler = maybe_add_ssl_handler(newurl, validate_certs, ca_path=ca_path) if handler: urllib_request._opener.add_handler(handler) # Preserve urllib2 compatibility if follow_redirects == 'urllib2': return urllib_request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl) # Handle disabled redirects elif follow_redirects in ['no', 'none', False]: raise urllib_error.HTTPError(newurl, code, msg, hdrs, fp) method = req.get_method() # Handle non-redirect HTTP status or invalid follow_redirects if follow_redirects in ['all', 'yes', True]: if code < 300 or code >= 400: raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp) elif follow_redirects == 'safe': if code < 300 or code >= 400 or method not in ('GET', 'HEAD'): raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp) else: raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp) try: # Python 2-3.3 data = req.get_data() origin_req_host = req.get_origin_req_host() except AttributeError: # Python 3.4+ data = req.data origin_req_host = req.origin_req_host # Be conciliant with URIs containing a space newurl = newurl.replace(' ', '%20') # Suport redirect with payload and original headers if code in (307, 308): # Preserve payload and headers headers = req.headers else: # Do not preserve payload and filter headers data = None headers = dict((k, v) for k, v in req.headers.items() if k.lower() not in ("content-length", "content-type", "transfer-encoding")) # http://tools.ietf.org/html/rfc7231#section-6.4.4 if code == 303 and method != 'HEAD': method = 'GET' # Do what the browsers do, despite standards... # First, turn 302s into GETs. if code == 302 and method != 'HEAD': method = 'GET' # Second, if a POST is responded to with a 301, turn it into a GET. if code == 301 and method == 'POST': method = 'GET' return RequestWithMethod(newurl, method=method, headers=headers, data=data, origin_req_host=origin_req_host, unverifiable=True, ) return RedirectHandler def build_ssl_validation_error(hostname, port, paths, exc=None): '''Inteligently build out the SSLValidationError based on what support you have installed ''' msg = [ ('Failed to validate the SSL certificate for %s:%s.' ' Make sure your managed systems have a valid CA' ' certificate installed.') ] if not HAS_SSLCONTEXT: msg.append('If the website serving the url uses SNI you need' ' python >= 2.7.9 on your managed machine') msg.append(' (the python executable used (%s) is version: %s)' % (sys.executable, ''.join(sys.version.splitlines()))) if not HAS_URLLIB3_PYOPENSSLCONTEXT and not HAS_URLLIB3_SSL_WRAP_SOCKET: msg.append('or you can install the `urllib3`, `pyOpenSSL`,' ' `ndg-httpsclient`, and `pyasn1` python modules') msg.append('to perform SNI verification in python >= 2.6.') msg.append('You can use validate_certs=False if you do' ' not need to confirm the servers identity but this is' ' unsafe and not recommended.' ' Paths checked for this platform: %s.') if exc: msg.append('The exception msg was: %s.' % to_native(exc)) raise SSLValidationError(' '.join(msg) % (hostname, port, ", ".join(paths))) def atexit_remove_file(filename): if os.path.exists(filename): try: os.unlink(filename) except Exception: # just ignore if we cannot delete, things should be ok pass class SSLValidationHandler(urllib_request.BaseHandler): ''' A custom handler class for SSL validation. Based on: http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python http://techknack.net/python-urllib2-handlers/ ''' CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\n" def __init__(self, hostname, port, ca_path=None): self.hostname = hostname self.port = port self.ca_path = ca_path def get_ca_certs(self): # tries to find a valid CA cert in one of the # standard locations for the current distribution ca_certs = [] cadata = bytearray() paths_checked = [] if self.ca_path: paths_checked = [self.ca_path] with open(to_bytes(self.ca_path, errors='surrogate_or_strict'), 'rb') as f: if HAS_SSLCONTEXT: cadata.extend( ssl.PEM_cert_to_DER_cert( to_native(f.read(), errors='surrogate_or_strict') ) ) else: ca_certs.append(f.read()) return ca_certs, cadata, paths_checked if not HAS_SSLCONTEXT: paths_checked.append('/etc/ssl/certs') system = to_text(platform.system(), errors='surrogate_or_strict') # build a list of paths to check for .crt/.pem files # based on the platform type if system == u'Linux': paths_checked.append('/etc/pki/ca-trust/extracted/pem') paths_checked.append('/etc/pki/tls/certs') paths_checked.append('/usr/share/ca-certificates/cacert.org') elif system == u'FreeBSD': paths_checked.append('/usr/local/share/certs') elif system == u'OpenBSD': paths_checked.append('/etc/ssl') elif system == u'NetBSD': ca_certs.append('/etc/openssl/certs') elif system == u'SunOS': paths_checked.append('/opt/local/etc/openssl/certs') # fall back to a user-deployed cert in a standard # location if the OS platform one is not available paths_checked.append('/etc/ansible') tmp_path = None if not HAS_SSLCONTEXT: tmp_fd, tmp_path = tempfile.mkstemp() atexit.register(atexit_remove_file, tmp_path) # Write the dummy ca cert if we are running on macOS if system == u'Darwin': if HAS_SSLCONTEXT: cadata.extend( ssl.PEM_cert_to_DER_cert( to_native(b_DUMMY_CA_CERT, errors='surrogate_or_strict') ) ) else: os.write(tmp_fd, b_DUMMY_CA_CERT) # Default Homebrew path for OpenSSL certs paths_checked.append('/usr/local/etc/openssl') # for all of the paths, find any .crt or .pem files # and compile them into single temp file for use # in the ssl check to speed up the test for path in paths_checked: if os.path.exists(path) and os.path.isdir(path): dir_contents = os.listdir(path) for f in dir_contents: full_path = os.path.join(path, f) if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt', '.pem'): try: if full_path not in LOADED_VERIFY_LOCATIONS: with open(full_path, 'rb') as cert_file: b_cert = cert_file.read() if HAS_SSLCONTEXT: try: cadata.extend( ssl.PEM_cert_to_DER_cert( to_native(b_cert, errors='surrogate_or_strict') ) ) except Exception: continue else: os.write(tmp_fd, b_cert) os.write(tmp_fd, b'\n') except (OSError, IOError): pass if HAS_SSLCONTEXT: default_verify_paths = ssl.get_default_verify_paths() paths_checked[:0] = [default_verify_paths.capath] return (tmp_path, cadata, paths_checked) def validate_proxy_response(self, response, valid_codes=None): ''' make sure we get back a valid code from the proxy ''' valid_codes = [200] if valid_codes is None else valid_codes try: (http_version, resp_code, msg) = re.match(br'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups() if int(resp_code) not in valid_codes: raise Exception except Exception: raise ProxyError('Connection to proxy failed') def detect_no_proxy(self, url): ''' Detect if the 'no_proxy' environment variable is set and honor those locations. ''' env_no_proxy = os.environ.get('no_proxy') if env_no_proxy: env_no_proxy = env_no_proxy.split(',') netloc = urlparse(url).netloc for host in env_no_proxy: if netloc.endswith(host) or netloc.split(':')[0].endswith(host): # Our requested URL matches something in no_proxy, so don't # use the proxy for this return False return True def make_context(self, cafile, cadata): cafile = self.ca_path or cafile if self.ca_path: cadata = None else: cadata = cadata or None if HAS_SSLCONTEXT: context = create_default_context(cafile=cafile) elif HAS_URLLIB3_PYOPENSSLCONTEXT: context = PyOpenSSLContext(PROTOCOL) else: raise NotImplementedError('Host libraries are too old to support creating an sslcontext') if cafile or cadata: context.load_verify_locations(cafile=cafile, cadata=cadata) return context def http_request(self, req): tmp_ca_cert_path, cadata, paths_checked = self.get_ca_certs() # Detect if 'no_proxy' environment variable is set and if our URL is included use_proxy = self.detect_no_proxy(req.get_full_url()) https_proxy = os.environ.get('https_proxy') context = None try: context = self.make_context(tmp_ca_cert_path, cadata) except NotImplementedError: # We'll make do with no context below pass try: if use_proxy and https_proxy: proxy_parts = generic_urlparse(urlparse(https_proxy)) port = proxy_parts.get('port') or 443 proxy_hostname = proxy_parts.get('hostname', None) if proxy_hostname is None or proxy_parts.get('scheme') == '': raise ProxyError("Failed to parse https_proxy environment variable." " Please make sure you export https proxy as 'https_proxy=<SCHEME>://<IP_ADDRESS>:<PORT>'") s = socket.create_connection((proxy_hostname, port)) if proxy_parts.get('scheme') == 'http': s.sendall(to_bytes(self.CONNECT_COMMAND % (self.hostname, self.port), errors='surrogate_or_strict')) if proxy_parts.get('username'): credentials = "%s:%s" % (proxy_parts.get('username', ''), proxy_parts.get('password', '')) s.sendall(b'Proxy-Authorization: Basic %s\r\n' % base64.b64encode(to_bytes(credentials, errors='surrogate_or_strict')).strip()) s.sendall(b'\r\n') connect_result = b"" while connect_result.find(b"\r\n\r\n") <= 0: connect_result += s.recv(4096) # 128 kilobytes of headers should be enough for everyone. if len(connect_result) > 131072: raise ProxyError('Proxy sent too verbose headers. Only 128KiB allowed.') self.validate_proxy_response(connect_result) if context: ssl_s = context.wrap_socket(s, server_hostname=self.hostname) elif HAS_URLLIB3_SSL_WRAP_SOCKET: ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname) else: ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL) match_hostname(ssl_s.getpeercert(), self.hostname) else: raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme')) else: s = socket.create_connection((self.hostname, self.port)) if context: ssl_s = context.wrap_socket(s, server_hostname=self.hostname) elif HAS_URLLIB3_SSL_WRAP_SOCKET: ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname) else: ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL) match_hostname(ssl_s.getpeercert(), self.hostname) # close the ssl connection # ssl_s.unwrap() s.close() except (ssl.SSLError, CertificateError) as e: build_ssl_validation_error(self.hostname, self.port, paths_checked, e) except socket.error as e: raise ConnectionError('Failed to connect to %s at port %s: %s' % (self.hostname, self.port, to_native(e))) return req https_request = http_request def maybe_add_ssl_handler(url, validate_certs, ca_path=None): parsed = generic_urlparse(urlparse(url)) if parsed.scheme == 'https' and validate_certs: if not HAS_SSL: raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False,' ' however this is unsafe and not recommended') # create the SSL validation handler and # add it to the list of handlers return SSLValidationHandler(parsed.hostname, parsed.port or 443, ca_path=ca_path) def rfc2822_date_string(timetuple, zone='-0000'): """Accepts a timetuple and optional zone which defaults to ``-0000`` and returns a date string as specified by RFC 2822, e.g.: Fri, 09 Nov 2001 01:08:47 -0000 Copied from email.utils.formatdate and modified for separate use """ return '%s, %02d %s %04d %02d:%02d:%02d %s' % ( ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]], timetuple[2], ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1], timetuple[0], timetuple[3], timetuple[4], timetuple[5], zone) class Request: def __init__(self, headers=None, use_proxy=True, force=False, timeout=10, validate_certs=True, url_username=None, url_password=None, http_agent=None, force_basic_auth=False, follow_redirects='urllib2', client_cert=None, client_key=None, cookies=None, unix_socket=None, ca_path=None): """This class works somewhat similarly to the ``Session`` class of from requests by defining a cookiejar that an be used across requests as well as cascaded defaults that can apply to repeated requests For documentation of params, see ``Request.open`` >>> from ansible.module_utils.urls import Request >>> r = Request() >>> r.open('GET', 'http://httpbin.org/cookies/set?k1=v1').read() '{\n "cookies": {\n "k1": "v1"\n }\n}\n' >>> r = Request(url_username='user', url_password='passwd') >>> r.open('GET', 'http://httpbin.org/basic-auth/user/passwd').read() '{\n "authenticated": true, \n "user": "user"\n}\n' >>> r = Request(headers=dict(foo='bar')) >>> r.open('GET', 'http://httpbin.org/get', headers=dict(baz='qux')).read() """ self.headers = headers or {} if not isinstance(self.headers, dict): raise ValueError("headers must be a dict: %r" % self.headers) self.use_proxy = use_proxy self.force = force self.timeout = timeout self.validate_certs = validate_certs self.url_username = url_username self.url_password = url_password self.http_agent = http_agent self.force_basic_auth = force_basic_auth self.follow_redirects = follow_redirects self.client_cert = client_cert self.client_key = client_key self.unix_socket = unix_socket self.ca_path = ca_path if isinstance(cookies, cookiejar.CookieJar): self.cookies = cookies else: self.cookies = cookiejar.CookieJar() def _fallback(self, value, fallback): if value is None: return fallback return value def open(self, method, url, data=None, headers=None, use_proxy=None, force=None, last_mod_time=None, timeout=None, validate_certs=None, url_username=None, url_password=None, http_agent=None, force_basic_auth=None, follow_redirects=None, client_cert=None, client_key=None, cookies=None, use_gssapi=False, unix_socket=None, ca_path=None, unredirected_headers=None): """ Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3) Does not require the module environment Returns :class:`HTTPResponse` object. :arg method: method for the request :arg url: URL to request :kwarg data: (optional) bytes, or file-like object to send in the body of the request :kwarg headers: (optional) Dictionary of HTTP Headers to send with the request :kwarg use_proxy: (optional) Boolean of whether or not to use proxy :kwarg force: (optional) Boolean of whether or not to set `cache-control: no-cache` header :kwarg last_mod_time: (optional) Datetime object to use when setting If-Modified-Since header :kwarg timeout: (optional) How long to wait for the server to send data before giving up, as a float :kwarg validate_certs: (optional) Booleani that controls whether we verify the server's TLS certificate :kwarg url_username: (optional) String of the user to use when authenticating :kwarg url_password: (optional) String of the password to use when authenticating :kwarg http_agent: (optional) String of the User-Agent to use in the request :kwarg force_basic_auth: (optional) Boolean determining if auth header should be sent in the initial request :kwarg follow_redirects: (optional) String of urllib2, all/yes, safe, none to determine how redirects are followed, see RedirectHandlerFactory for more information :kwarg client_cert: (optional) PEM formatted certificate chain file to be used for SSL client authentication. This file can also include the key as well, and if the key is included, client_key is not required :kwarg client_key: (optional) PEM formatted file that contains your private key to be used for SSL client authentication. If client_cert contains both the certificate and key, this option is not required :kwarg cookies: (optional) CookieJar object to send with the request :kwarg use_gssapi: (optional) Use GSSAPI handler of requests. :kwarg unix_socket: (optional) String of file system path to unix socket file to use when establishing connection to the provided url :kwarg ca_path: (optional) String of file system path to CA cert bundle to use :kwarg unredirected_headers: (optional) A list of headers to not attach on a redirected request :returns: HTTPResponse. Added in Ansible 2.9 """ method = method.upper() if headers is None: headers = {} elif not isinstance(headers, dict): raise ValueError("headers must be a dict") headers = dict(self.headers, **headers) use_proxy = self._fallback(use_proxy, self.use_proxy) force = self._fallback(force, self.force) timeout = self._fallback(timeout, self.timeout) validate_certs = self._fallback(validate_certs, self.validate_certs) url_username = self._fallback(url_username, self.url_username) url_password = self._fallback(url_password, self.url_password) http_agent = self._fallback(http_agent, self.http_agent) force_basic_auth = self._fallback(force_basic_auth, self.force_basic_auth) follow_redirects = self._fallback(follow_redirects, self.follow_redirects) client_cert = self._fallback(client_cert, self.client_cert) client_key = self._fallback(client_key, self.client_key) cookies = self._fallback(cookies, self.cookies) unix_socket = self._fallback(unix_socket, self.unix_socket) ca_path = self._fallback(ca_path, self.ca_path) handlers = [] if unix_socket: handlers.append(UnixHTTPHandler(unix_socket)) ssl_handler = maybe_add_ssl_handler(url, validate_certs, ca_path=ca_path) if ssl_handler and not HAS_SSLCONTEXT: handlers.append(ssl_handler) if HAS_GSSAPI and use_gssapi: handlers.append(urllib_gssapi.HTTPSPNEGOAuthHandler()) parsed = generic_urlparse(urlparse(url)) if parsed.scheme != 'ftp': username = url_username if username: password = url_password netloc = parsed.netloc elif '@' in parsed.netloc: credentials, netloc = parsed.netloc.split('@', 1) if ':' in credentials: username, password = credentials.split(':', 1) else: username = credentials password = '' parsed_list = parsed.as_list() parsed_list[1] = netloc # reconstruct url without credentials url = urlunparse(parsed_list) if username and not force_basic_auth: passman = urllib_request.HTTPPasswordMgrWithDefaultRealm() # this creates a password manager passman.add_password(None, netloc, username, password) # because we have put None at the start it will always # use this username/password combination for urls # for which `theurl` is a super-url authhandler = urllib_request.HTTPBasicAuthHandler(passman) digest_authhandler = urllib_request.HTTPDigestAuthHandler(passman) # create the AuthHandler handlers.append(authhandler) handlers.append(digest_authhandler) elif username and force_basic_auth: headers["Authorization"] = basic_auth_header(username, password) else: try: rc = netrc.netrc(os.environ.get('NETRC')) login = rc.authenticators(parsed.hostname) except IOError: login = None if login: username, _, password = login if username and password: headers["Authorization"] = basic_auth_header(username, password) if not use_proxy: proxyhandler = urllib_request.ProxyHandler({}) handlers.append(proxyhandler) context = None if HAS_SSLCONTEXT and not validate_certs: # In 2.7.9, the default context validates certificates context = SSLContext(ssl.PROTOCOL_SSLv23) if ssl.OP_NO_SSLv2: context.options |= ssl.OP_NO_SSLv2 context.options |= ssl.OP_NO_SSLv3 context.verify_mode = ssl.CERT_NONE context.check_hostname = False handlers.append(HTTPSClientAuthHandler(client_cert=client_cert, client_key=client_key, context=context, unix_socket=unix_socket)) elif client_cert or unix_socket: handlers.append(HTTPSClientAuthHandler(client_cert=client_cert, client_key=client_key, unix_socket=unix_socket)) if ssl_handler and HAS_SSLCONTEXT and validate_certs: tmp_ca_path, cadata, paths_checked = ssl_handler.get_ca_certs() try: context = ssl_handler.make_context(tmp_ca_path, cadata) except NotImplementedError: pass # pre-2.6 versions of python cannot use the custom https # handler, since the socket class is lacking create_connection. # Some python builds lack HTTPS support. if hasattr(socket, 'create_connection') and CustomHTTPSHandler: kwargs = {} if HAS_SSLCONTEXT: kwargs['context'] = context handlers.append(CustomHTTPSHandler(**kwargs)) handlers.append(RedirectHandlerFactory(follow_redirects, validate_certs, ca_path=ca_path)) # add some nicer cookie handling if cookies is not None: handlers.append(urllib_request.HTTPCookieProcessor(cookies)) opener = urllib_request.build_opener(*handlers) urllib_request.install_opener(opener) data = to_bytes(data, nonstring='passthru') request = RequestWithMethod(url, method, data) # add the custom agent header, to help prevent issues # with sites that block the default urllib agent string if http_agent: request.add_header('User-agent', http_agent) # Cache control # Either we directly force a cache refresh if force: request.add_header('cache-control', 'no-cache') # or we do it if the original is more recent than our copy elif last_mod_time: tstamp = rfc2822_date_string(last_mod_time.timetuple()) request.add_header('If-Modified-Since', tstamp) # user defined headers now, which may override things we've set above unredirected_headers = unredirected_headers or [] for header in headers: if header in unredirected_headers: request.add_unredirected_header(header, headers[header]) else: request.add_header(header, headers[header]) urlopen_args = [request, None] if sys.version_info >= (2, 6, 0): # urlopen in python prior to 2.6.0 did not # have a timeout parameter urlopen_args.append(timeout) r = urllib_request.urlopen(*urlopen_args) return r def get(self, url, **kwargs): r"""Sends a GET request. Returns :class:`HTTPResponse` object. :arg url: URL to request :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('GET', url, **kwargs) def options(self, url, **kwargs): r"""Sends a OPTIONS request. Returns :class:`HTTPResponse` object. :arg url: URL to request :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('OPTIONS', url, **kwargs) def head(self, url, **kwargs): r"""Sends a HEAD request. Returns :class:`HTTPResponse` object. :arg url: URL to request :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('HEAD', url, **kwargs) def post(self, url, data=None, **kwargs): r"""Sends a POST request. Returns :class:`HTTPResponse` object. :arg url: URL to request. :kwarg data: (optional) bytes, or file-like object to send in the body of the request. :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('POST', url, data=data, **kwargs) def put(self, url, data=None, **kwargs): r"""Sends a PUT request. Returns :class:`HTTPResponse` object. :arg url: URL to request. :kwarg data: (optional) bytes, or file-like object to send in the body of the request. :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('PUT', url, data=data, **kwargs) def patch(self, url, data=None, **kwargs): r"""Sends a PATCH request. Returns :class:`HTTPResponse` object. :arg url: URL to request. :kwarg data: (optional) bytes, or file-like object to send in the body of the request. :kwarg \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('PATCH', url, data=data, **kwargs) def delete(self, url, **kwargs): r"""Sends a DELETE request. Returns :class:`HTTPResponse` object. :arg url: URL to request :kwargs \*\*kwargs: Optional arguments that ``open`` takes. :returns: HTTPResponse """ return self.open('DELETE', url, **kwargs) def open_url(url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10, validate_certs=True, url_username=None, url_password=None, http_agent=None, force_basic_auth=False, follow_redirects='urllib2', client_cert=None, client_key=None, cookies=None, use_gssapi=False, unix_socket=None, ca_path=None, unredirected_headers=None): ''' Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3) Does not require the module environment ''' method = method or ('POST' if data else 'GET') return Request().open(method, url, data=data, headers=headers, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, url_username=url_username, url_password=url_password, http_agent=http_agent, force_basic_auth=force_basic_auth, follow_redirects=follow_redirects, client_cert=client_cert, client_key=client_key, cookies=cookies, use_gssapi=use_gssapi, unix_socket=unix_socket, ca_path=ca_path, unredirected_headers=unredirected_headers) # # Module-related functions # def basic_auth_header(username, password): """Takes a username and password and returns a byte string suitable for using as value of an Authorization header to do basic auth. """ return b"Basic %s" % base64.b64encode(to_bytes("%s:%s" % (username, password), errors='surrogate_or_strict')) def url_argument_spec(): ''' Creates an argument spec that can be used with any module that will be requesting content via urllib/urllib2 ''' return dict( url=dict(type='str'), force=dict(type='bool', default=False, aliases=['thirsty'], deprecated_aliases=[dict(name='thirsty', version='2.13')]), http_agent=dict(type='str', default='ansible-httpget'), use_proxy=dict(type='bool', default=True), validate_certs=dict(type='bool', default=True), url_username=dict(type='str'), url_password=dict(type='str', no_log=True), force_basic_auth=dict(type='bool', default=False), client_cert=dict(type='path'), client_key=dict(type='path'), ) def fetch_url(module, url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10, use_gssapi=False, unix_socket=None, ca_path=None): """Sends a request via HTTP(S) or FTP (needs the module as parameter) :arg module: The AnsibleModule (used to get username, password etc. (s.b.). :arg url: The url to use. :kwarg data: The data to be sent (in case of POST/PUT). :kwarg headers: A dict with the request headers. :kwarg method: "POST", "PUT", etc. :kwarg boolean use_proxy: Default: True :kwarg boolean force: If True: Do not get a cached copy (Default: False) :kwarg last_mod_time: Default: None :kwarg int timeout: Default: 10 :kwarg boolean use_gssapi: Default: False :kwarg unix_socket: (optional) String of file system path to unix socket file to use when establishing connection to the provided url :kwarg ca_path: (optional) String of file system path to CA cert bundle to use :returns: A tuple of (**response**, **info**). Use ``response.read()`` to read the data. The **info** contains the 'status' and other meta data. When a HttpError (status > 400) occurred then ``info['body']`` contains the error response data:: Example:: data={...} resp, info = fetch_url(module, "http://example.com", data=module.jsonify(data), headers={'Content-type': 'application/json'}, method="POST") status_code = info["status"] body = resp.read() if status_code >= 400 : body = info['body'] """ if not HAS_URLPARSE: module.fail_json(msg='urlparse is not installed') # ensure we use proper tempdir old_tempdir = tempfile.tempdir tempfile.tempdir = module.tmpdir # Get validate_certs from the module params validate_certs = module.params.get('validate_certs', True) username = module.params.get('url_username', '') password = module.params.get('url_password', '') http_agent = module.params.get('http_agent', 'ansible-httpget') force_basic_auth = module.params.get('force_basic_auth', '') follow_redirects = module.params.get('follow_redirects', 'urllib2') client_cert = module.params.get('client_cert') client_key = module.params.get('client_key') cookies = cookiejar.LWPCookieJar() r = None info = dict(url=url, status=-1) try: r = open_url(url, data=data, headers=headers, method=method, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, url_username=username, url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth, follow_redirects=follow_redirects, client_cert=client_cert, client_key=client_key, cookies=cookies, use_gssapi=use_gssapi, unix_socket=unix_socket, ca_path=ca_path) # Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable info.update(dict((k.lower(), v) for k, v in r.info().items())) # Don't be lossy, append header values for duplicate headers # In Py2 there is nothing that needs done, py2 does this for us if PY3: temp_headers = {} for name, value in r.headers.items(): # The same as above, lower case keys to match py2 behavior, and create more consistent results name = name.lower() if name in temp_headers: temp_headers[name] = ', '.join((temp_headers[name], value)) else: temp_headers[name] = value info.update(temp_headers) # parse the cookies into a nice dictionary cookie_list = [] cookie_dict = dict() # Python sorts cookies in order of most specific (ie. longest) path first. See ``CookieJar._cookie_attrs`` # Cookies with the same path are reversed from response order. # This code makes no assumptions about that, and accepts the order given by python for cookie in cookies: cookie_dict[cookie.name] = cookie.value cookie_list.append((cookie.name, cookie.value)) info['cookies_string'] = '; '.join('%s=%s' % c for c in cookie_list) info['cookies'] = cookie_dict # finally update the result with a message about the fetch info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.code)) except NoSSLError as e: distribution = get_distribution() if distribution is not None and distribution.lower() == 'redhat': module.fail_json(msg='%s. You can also install python-ssl from EPEL' % to_native(e), **info) else: module.fail_json(msg='%s' % to_native(e), **info) except (ConnectionError, ValueError) as e: module.fail_json(msg=to_native(e), **info) except urllib_error.HTTPError as e: try: body = e.read() except AttributeError: body = '' # Try to add exception info to the output but don't fail if we can't try: # Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable info.update(dict((k.lower(), v) for k, v in e.info().items())) except Exception: pass info.update({'msg': to_native(e), 'body': body, 'status': e.code}) except urllib_error.URLError as e: code = int(getattr(e, 'code', -1)) info.update(dict(msg="Request failed: %s" % to_native(e), status=code)) except socket.error as e: info.update(dict(msg="Connection failure: %s" % to_native(e), status=-1)) except httplib.BadStatusLine as e: info.update(dict(msg="Connection failure: connection was closed before a valid response was received: %s" % to_native(e.line), status=-1)) except Exception as e: info.update(dict(msg="An unknown error occurred: %s" % to_native(e), status=-1), exception=traceback.format_exc()) finally: tempfile.tempdir = old_tempdir return r, info def fetch_file(module, url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10): '''Download and save a file via HTTP(S) or FTP (needs the module as parameter). This is basically a wrapper around fetch_url(). :arg module: The AnsibleModule (used to get username, password etc. (s.b.). :arg url: The url to use. :kwarg data: The data to be sent (in case of POST/PUT). :kwarg headers: A dict with the request headers. :kwarg method: "POST", "PUT", etc. :kwarg boolean use_proxy: Default: True :kwarg boolean force: If True: Do not get a cached copy (Default: False) :kwarg last_mod_time: Default: None :kwarg int timeout: Default: 10 :returns: A string, the path to the downloaded file. ''' # download file bufsize = 65536 file_name, file_ext = os.path.splitext(str(url.rsplit('/', 1)[1])) fetch_temp_file = tempfile.NamedTemporaryFile(dir=module.tmpdir, prefix=file_name, suffix=file_ext, delete=False) module.add_cleanup_file(fetch_temp_file.name) try: rsp, info = fetch_url(module, url, data, headers, method, use_proxy, force, last_mod_time, timeout) if not rsp: module.fail_json(msg="Failure downloading %s, %s" % (url, info['msg'])) data = rsp.read(bufsize) while data: fetch_temp_file.write(data) data = rsp.read(bufsize) fetch_temp_file.close() except Exception as e: module.fail_json(msg="Failure downloading %s, %s" % (url, to_native(e))) return fetch_temp_file.name
closed
ansible/ansible
https://github.com/ansible/ansible
61,672
jenkins_plugin: use of authorization crumb must be conducted in a session since LTS 2.176
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY Plugins cannot be installed using the jenkins_plugin module since upgrading to LTS 2.176. This is due to additional CSRF security where the crumb that is issued may only be used within the same session. In the jenkins log messages like this are reported: ``` WARNING: Found invalid crumb 5cae269d58b83057dfbd85f6c32f0845. Will check remaining parameters for a valid one... ``` The module will then error with `Cannot install plugin.` ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME jenkins_plugin ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ``` ansible 2.8.4 config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/james/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/dist-packages/ansible executable location = /usr/bin/ansible python version = 2.7.16 (default, Apr 6 2019, 01:42:57) [GCC 8.3.0] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below (no output) ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Install jenkins 2.176, attempt to install plugins using ansible, e.g. a task such as: ``` - name: "Install Jenkins plugins" jenkins_plugin: owner: jenkins group: jenkins name: "{{ item }}" url: "https://{{ ansible_host }}:8090/" url_username: "{{ ansible_user }}" url_password: "{{ ansible_become_pass }}" force_basic_auth: yes with_items: - ansicolor ``` <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> The named plugin is installed in jenkins. ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61672
https://github.com/ansible/ansible/pull/61673
dfc023209fc7460c449b2e97feb1b5e6d79d1491
76b5b90bd6fdf4ac3fb785f81c5fab18445db2e2
2019-09-02T09:46:51Z
python
2019-09-10T13:44:37Z
lib/ansible/modules/web_infrastructure/jenkins_plugin.py
#!/usr/bin/python # encoding: utf-8 # (c) 2016, Jiri Tyr <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: jenkins_plugin author: Jiri Tyr (@jtyr) version_added: '2.2' short_description: Add or remove Jenkins plugin description: - Ansible module which helps to manage Jenkins plugins. options: group: description: - Name of the Jenkins group on the OS. default: jenkins jenkins_home: description: - Home directory of the Jenkins user. default: /var/lib/jenkins mode: description: - File mode applied on versioned plugins. default: '0644' name: description: - Plugin name. required: yes owner: description: - Name of the Jenkins user on the OS. default: jenkins state: description: - Desired plugin state. - If the C(latest) is set, the check for new version will be performed every time. This is suitable to keep the plugin up-to-date. choices: [absent, present, pinned, unpinned, enabled, disabled, latest] default: present timeout: description: - Server connection timeout in secs. default: 30 updates_expiration: description: - Number of seconds after which a new copy of the I(update-center.json) file is downloaded. This is used to avoid the need to download the plugin to calculate its checksum when C(latest) is specified. - Set it to C(0) if no cache file should be used. In that case, the plugin file will always be downloaded to calculate its checksum when C(latest) is specified. default: 86400 updates_url: description: - URL of the Update Centre. - Used as the base URL to download the plugins and the I(update-center.json) JSON file. default: https://updates.jenkins.io url: description: - URL of the Jenkins server. default: http://localhost:8080 version: description: - Plugin version number. - If this option is specified, all plugin dependencies must be installed manually. - It might take longer to verify that the correct version is installed. This is especially true if a specific version number is specified. - Quote the version to prevent the value to be interpreted as float. For example if C(1.20) would be unquoted, it would become C(1.2). with_dependencies: description: - Defines whether to install plugin dependencies. - This option takes effect only if the I(version) is not defined. type: bool default: yes notes: - Plugin installation should be run under root or the same user which owns the plugin files on the disk. Only if the plugin is not installed yet and no version is specified, the API installation is performed which requires only the Web UI credentials. - It's necessary to notify the handler or call the I(service) module to restart the Jenkins service after a new plugin was installed. - Pinning works only if the plugin is installed and Jenkis service was successfully restarted after the plugin installation. - It is not possible to run the module remotely by changing the I(url) parameter to point to the Jenkins server. The module must be used on the host where Jenkins runs as it needs direct access to the plugin files. - "The C(params) option was removed in Ansible 2.5 due to circumventing Ansible's option handling" extends_documentation_fragment: - url ''' EXAMPLES = ''' - name: Install plugin jenkins_plugin: name: build-pipeline-plugin - name: Install plugin without its dependencies jenkins_plugin: name: build-pipeline-plugin with_dependencies: no - name: Make sure the plugin is always up-to-date jenkins_plugin: name: token-macro state: latest - name: Install specific version of the plugin jenkins_plugin: name: token-macro version: "1.15" - name: Pin the plugin jenkins_plugin: name: token-macro state: pinned - name: Unpin the plugin jenkins_plugin: name: token-macro state: unpinned - name: Enable the plugin jenkins_plugin: name: token-macro state: enabled - name: Disable the plugin jenkins_plugin: name: token-macro state: disabled - name: Uninstall plugin jenkins_plugin: name: build-pipeline-plugin state: absent # # Example of how to authenticate # - name: Install plugin jenkins_plugin: name: build-pipeline-plugin url_username: admin url_password: p4ssw0rd url: http://localhost:8888 # # Example of a Play which handles Jenkins restarts during the state changes # - name: Jenkins Master play hosts: jenkins-master vars: my_jenkins_plugins: token-macro: enabled: yes build-pipeline-plugin: version: "1.4.9" pinned: no enabled: yes tasks: - name: Install plugins without a specific version jenkins_plugin: name: "{{ item.key }}" register: my_jenkins_plugin_unversioned when: > 'version' not in item.value with_dict: "{{ my_jenkins_plugins }}" - name: Install plugins with a specific version jenkins_plugin: name: "{{ item.key }}" version: "{{ item.value['version'] }}" register: my_jenkins_plugin_versioned when: > 'version' in item.value with_dict: "{{ my_jenkins_plugins }}" - name: Initiate the fact set_fact: jenkins_restart_required: no - name: Check if restart is required by any of the versioned plugins set_fact: jenkins_restart_required: yes when: item.changed with_items: "{{ my_jenkins_plugin_versioned.results }}" - name: Check if restart is required by any of the unversioned plugins set_fact: jenkins_restart_required: yes when: item.changed with_items: "{{ my_jenkins_plugin_unversioned.results }}" - name: Restart Jenkins if required service: name: jenkins state: restarted when: jenkins_restart_required - name: Wait for Jenkins to start up uri: url: http://localhost:8080 status_code: 200 timeout: 5 register: jenkins_service_status # Keep trying for 5 mins in 5 sec intervals retries: 60 delay: 5 until: > 'status' in jenkins_service_status and jenkins_service_status['status'] == 200 when: jenkins_restart_required - name: Reset the fact set_fact: jenkins_restart_required: no when: jenkins_restart_required - name: Plugin pinning jenkins_plugin: name: "{{ item.key }}" state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}" when: > 'pinned' in item.value with_dict: "{{ my_jenkins_plugins }}" - name: Plugin enabling jenkins_plugin: name: "{{ item.key }}" state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}" when: > 'enabled' in item.value with_dict: "{{ my_jenkins_plugins }}" ''' RETURN = ''' plugin: description: plugin name returned: success type: str sample: build-pipeline-plugin state: description: state of the target, after execution returned: success type: str sample: "present" ''' from ansible.module_utils.basic import AnsibleModule, to_bytes from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url, url_argument_spec from ansible.module_utils._text import to_native, text_type, binary_type import base64 import hashlib import json import os import tempfile import time class JenkinsPlugin(object): def __init__(self, module): # To be able to call fail_json self.module = module # Shortcuts for the params self.params = self.module.params self.url = self.params['url'] self.timeout = self.params['timeout'] # Crumb self.crumb = {} if self._csrf_enabled(): self.crumb = self._get_crumb() # Get list of installed plugins self._get_installed_plugins() def _csrf_enabled(self): csrf_data = self._get_json_data( "%s/%s" % (self.url, "api/json"), 'CSRF') if 'useCrumbs' not in csrf_data: self.module.fail_json( msg="Required fields not found in the Crumbs response.", details=csrf_data) return csrf_data['useCrumbs'] def _get_json_data(self, url, what, **kwargs): # Get the JSON data r = self._get_url_data(url, what, **kwargs) # Parse the JSON data try: json_data = json.loads(to_native(r.read())) except Exception as e: self.module.fail_json( msg="Cannot parse %s JSON data." % what, details=to_native(e)) return json_data def _get_url_data( self, url, what=None, msg_status=None, msg_exception=None, **kwargs): # Compose default messages if msg_status is None: msg_status = "Cannot get %s" % what if msg_exception is None: msg_exception = "Retrieval of %s failed." % what # Get the URL data try: response, info = fetch_url( self.module, url, timeout=self.timeout, **kwargs) if info['status'] != 200: self.module.fail_json(msg=msg_status, details=info['msg']) except Exception as e: self.module.fail_json(msg=msg_exception, details=to_native(e)) return response def _get_crumb(self): crumb_data = self._get_json_data( "%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb') if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data: ret = { crumb_data['crumbRequestField']: crumb_data['crumb'] } else: self.module.fail_json( msg="Required fields not found in the Crum response.", details=crumb_data) return ret def _get_installed_plugins(self): plugins_data = self._get_json_data( "%s/%s" % (self.url, "pluginManager/api/json?depth=1"), 'list of plugins') # Check if we got valid data if 'plugins' not in plugins_data: self.module.fail_json(msg="No valid plugin data found.") # Create final list of installed/pined plugins self.is_installed = False self.is_pinned = False self.is_enabled = False for p in plugins_data['plugins']: if p['shortName'] == self.params['name']: self.is_installed = True if p['pinned']: self.is_pinned = True if p['enabled']: self.is_enabled = True break def install(self): changed = False plugin_file = ( '%s/plugins/%s.jpi' % ( self.params['jenkins_home'], self.params['name'])) if not self.is_installed and self.params['version'] in [None, 'latest']: if not self.module.check_mode: # Install the plugin (with dependencies) install_script = ( 'd = Jenkins.instance.updateCenter.getPlugin("%s")' '.deploy(); d.get();' % self.params['name']) if self.params['with_dependencies']: install_script = ( 'Jenkins.instance.updateCenter.getPlugin("%s")' '.getNeededDependencies().each{it.deploy()}; %s' % ( self.params['name'], install_script)) script_data = { 'script': install_script } script_data.update(self.crumb) data = urlencode(script_data) # Send the installation request r = self._get_url_data( "%s/scriptText" % self.url, msg_status="Cannot install plugin.", msg_exception="Plugin installation has failed.", data=data) hpi_file = '%s/plugins/%s.hpi' % ( self.params['jenkins_home'], self.params['name']) if os.path.isfile(hpi_file): os.remove(hpi_file) changed = True else: # Check if the plugin directory exists if not os.path.isdir(self.params['jenkins_home']): self.module.fail_json( msg="Jenkins home directory doesn't exist.") md5sum_old = None if os.path.isfile(plugin_file): # Make the checksum of the currently installed plugin with open(plugin_file, 'rb') as md5_plugin_fh: md5_plugin_content = md5_plugin_fh.read() md5sum_old = hashlib.md5(md5_plugin_content).hexdigest() if self.params['version'] in [None, 'latest']: # Take latest version plugin_url = ( "%s/latest/%s.hpi" % ( self.params['updates_url'], self.params['name'])) else: # Take specific version plugin_url = ( "{0}/download/plugins/" "{1}/{2}/{1}.hpi".format( self.params['updates_url'], self.params['name'], self.params['version'])) if ( self.params['updates_expiration'] == 0 or self.params['version'] not in [None, 'latest'] or md5sum_old is None): # Download the plugin file directly r = self._download_plugin(plugin_url) # Write downloaded plugin into file if checksums don't match if md5sum_old is None: # No previously installed plugin if not self.module.check_mode: self._write_file(plugin_file, r) changed = True else: # Get data for the MD5 data = r.read() # Make new checksum md5sum_new = hashlib.md5(data).hexdigest() # If the checksum is different from the currently installed # plugin, store the new plugin if md5sum_old != md5sum_new: if not self.module.check_mode: self._write_file(plugin_file, data) changed = True elif self.params['version'] == 'latest': # Check for update from the updates JSON file plugin_data = self._download_updates() try: with open(plugin_file, 'rb') as sha1_plugin_fh: sha1_plugin_content = sha1_plugin_fh.read() sha1_old = hashlib.sha1(sha1_plugin_content) except Exception as e: self.module.fail_json( msg="Cannot calculate SHA1 of the old plugin.", details=to_native(e)) sha1sum_old = base64.b64encode(sha1_old.digest()) # If the latest version changed, download it if sha1sum_old != to_bytes(plugin_data['sha1']): if not self.module.check_mode: r = self._download_plugin(plugin_url) self._write_file(plugin_file, r) changed = True # Change file attributes if needed if os.path.isfile(plugin_file): params = { 'dest': plugin_file } params.update(self.params) file_args = self.module.load_file_common_arguments(params) if not self.module.check_mode: # Not sure how to run this in the check mode changed = self.module.set_fs_attributes_if_different( file_args, changed) else: # See the comment above changed = True return changed def _download_updates(self): updates_filename = 'jenkins-plugin-cache.json' updates_dir = os.path.expanduser('~/.ansible/tmp') updates_file = "%s/%s" % (updates_dir, updates_filename) download_updates = True # Check if we need to download new updates file if os.path.isfile(updates_file): # Get timestamp when the file was changed last time ts_file = os.stat(updates_file).st_mtime ts_now = time.time() if ts_now - ts_file < self.params['updates_expiration']: download_updates = False updates_file_orig = updates_file # Download the updates file if needed if download_updates: url = "%s/update-center.json" % self.params['updates_url'] # Get the data r = self._get_url_data( url, msg_status="Remote updates not found.", msg_exception="Updates download failed.") # Write the updates file update_fd, updates_file = tempfile.mkstemp() os.write(update_fd, r.read()) try: os.close(update_fd) except IOError as e: self.module.fail_json( msg="Cannot close the tmp updates file %s." % updates_file, details=to_native(e)) # Open the updates file try: f = open(updates_file, encoding='utf-8') except IOError as e: self.module.fail_json( msg="Cannot open temporal updates file.", details=to_native(e)) i = 0 for line in f: # Read only the second line if i == 1: try: data = json.loads(line) except Exception as e: self.module.fail_json( msg="Cannot load JSON data from the tmp updates file.", details=to_native(e)) break i += 1 # Move the updates file to the right place if we could read it if download_updates: # Make sure the destination directory exists if not os.path.isdir(updates_dir): try: os.makedirs(updates_dir, int('0700', 8)) except OSError as e: self.module.fail_json( msg="Cannot create temporal directory.", details=to_native(e)) self.module.atomic_move(updates_file, updates_file_orig) # Check if we have the plugin data available if 'plugins' not in data or self.params['name'] not in data['plugins']: self.module.fail_json( msg="Cannot find plugin data in the updates file.") return data['plugins'][self.params['name']] def _download_plugin(self, plugin_url): # Download the plugin r = self._get_url_data( plugin_url, msg_status="Plugin not found.", msg_exception="Plugin download failed.") return r def _write_file(self, f, data): # Store the plugin into a temp file and then move it tmp_f_fd, tmp_f = tempfile.mkstemp() if isinstance(data, (text_type, binary_type)): os.write(tmp_f_fd, data) else: os.write(tmp_f_fd, data.read()) try: os.close(tmp_f_fd) except IOError as e: self.module.fail_json( msg='Cannot close the temporal plugin file %s.' % tmp_f, details=to_native(e)) # Move the file onto the right place self.module.atomic_move(tmp_f, f) def uninstall(self): changed = False # Perform the action if self.is_installed: if not self.module.check_mode: self._pm_query('doUninstall', 'Uninstallation') changed = True return changed def pin(self): return self._pinning('pin') def unpin(self): return self._pinning('unpin') def _pinning(self, action): changed = False # Check if the plugin is pinned/unpinned if ( action == 'pin' and not self.is_pinned or action == 'unpin' and self.is_pinned): # Perform the action if not self.module.check_mode: self._pm_query(action, "%sning" % action.capitalize()) changed = True return changed def enable(self): return self._enabling('enable') def disable(self): return self._enabling('disable') def _enabling(self, action): changed = False # Check if the plugin is pinned/unpinned if ( action == 'enable' and not self.is_enabled or action == 'disable' and self.is_enabled): # Perform the action if not self.module.check_mode: self._pm_query( "make%sd" % action.capitalize(), "%sing" % action[:-1].capitalize()) changed = True return changed def _pm_query(self, action, msg): url = "%s/pluginManager/plugin/%s/%s" % ( self.params['url'], self.params['name'], action) data = urlencode(self.crumb) # Send the request self._get_url_data( url, msg_status="Plugin not found. %s" % url, msg_exception="%s has failed." % msg, data=data) def main(): # Module arguments argument_spec = url_argument_spec() argument_spec.update( group=dict(default='jenkins'), jenkins_home=dict(default='/var/lib/jenkins'), mode=dict(default='0644', type='raw'), name=dict(required=True), owner=dict(default='jenkins'), params=dict(type='dict'), state=dict( choices=[ 'present', 'absent', 'pinned', 'unpinned', 'enabled', 'disabled', 'latest'], default='present'), timeout=dict(default=30, type="int"), updates_expiration=dict(default=86400, type="int"), updates_url=dict(default='https://updates.jenkins.io'), url=dict(default='http://localhost:8080'), url_password=dict(no_log=True), version=dict(), with_dependencies=dict(default=True, type='bool'), ) # Module settings module = AnsibleModule( argument_spec=argument_spec, add_file_common_args=True, supports_check_mode=True, ) # Params was removed # https://meetbot.fedoraproject.org/ansible-meeting/2017-09-28/ansible_dev_meeting.2017-09-28-15.00.log.html if module.params['params']: module.fail_json(msg="The params option to jenkins_plugin was removed in Ansible 2.5 since it circumvents Ansible's option handling") # Force basic authentication module.params['force_basic_auth'] = True # Convert timeout to float try: module.params['timeout'] = float(module.params['timeout']) except ValueError as e: module.fail_json( msg='Cannot convert %s to float.' % module.params['timeout'], details=to_native(e)) # Set version to latest if state is latest if module.params['state'] == 'latest': module.params['state'] = 'present' module.params['version'] = 'latest' # Create some shortcuts name = module.params['name'] state = module.params['state'] # Initial change state of the task changed = False # Instantiate the JenkinsPlugin object jp = JenkinsPlugin(module) # Perform action depending on the requested state if state == 'present': changed = jp.install() elif state == 'absent': changed = jp.uninstall() elif state == 'pinned': changed = jp.pin() elif state == 'unpinned': changed = jp.unpin() elif state == 'enabled': changed = jp.enable() elif state == 'disabled': changed = jp.disable() # Print status of the change module.exit_json(changed=changed, plugin=name, state=state) if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
61,672
jenkins_plugin: use of authorization crumb must be conducted in a session since LTS 2.176
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY Plugins cannot be installed using the jenkins_plugin module since upgrading to LTS 2.176. This is due to additional CSRF security where the crumb that is issued may only be used within the same session. In the jenkins log messages like this are reported: ``` WARNING: Found invalid crumb 5cae269d58b83057dfbd85f6c32f0845. Will check remaining parameters for a valid one... ``` The module will then error with `Cannot install plugin.` ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME jenkins_plugin ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ``` ansible 2.8.4 config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/james/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/dist-packages/ansible executable location = /usr/bin/ansible python version = 2.7.16 (default, Apr 6 2019, 01:42:57) [GCC 8.3.0] ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below (no output) ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Install jenkins 2.176, attempt to install plugins using ansible, e.g. a task such as: ``` - name: "Install Jenkins plugins" jenkins_plugin: owner: jenkins group: jenkins name: "{{ item }}" url: "https://{{ ansible_host }}:8090/" url_username: "{{ ansible_user }}" url_password: "{{ ansible_become_pass }}" force_basic_auth: yes with_items: - ansicolor ``` <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> The named plugin is installed in jenkins. ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/61672
https://github.com/ansible/ansible/pull/61673
dfc023209fc7460c449b2e97feb1b5e6d79d1491
76b5b90bd6fdf4ac3fb785f81c5fab18445db2e2
2019-09-02T09:46:51Z
python
2019-09-10T13:44:37Z
lib/ansible/modules/web_infrastructure/jenkins_script.py
#!/usr/bin/python # encoding: utf-8 # (c) 2016, James Hogarth <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- author: James Hogarth (@hogarthj) module: jenkins_script short_description: Executes a groovy script in the jenkins instance version_added: '2.3' description: - The C(jenkins_script) module takes a script plus a dict of values to use within the script and returns the result of the script being run. options: script: description: - The groovy script to be executed. This gets passed as a string Template if args is defined. required: true url: description: - The jenkins server to execute the script against. The default is a local jenkins instance that is not being proxied through a webserver. default: http://localhost:8080 validate_certs: description: - If set to C(no), the SSL certificates will not be validated. This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site. type: bool default: 'yes' user: description: - The username to connect to the jenkins server with. password: description: - The password to connect to the jenkins server with. timeout: description: - The request timeout in seconds default: 10 version_added: "2.4" args: description: - A dict of key-value pairs used in formatting the script using string.Template (see https://docs.python.org/2/library/string.html#template-strings). notes: - Since the script can do anything this does not report on changes. Knowing the script is being run it's important to set changed_when for the ansible output to be clear on any alterations made. ''' EXAMPLES = ''' - name: Obtaining a list of plugins jenkins_script: script: 'println(Jenkins.instance.pluginManager.plugins)' user: admin password: admin - name: Setting master using a variable to hold a more complicate script set_fact: setmaster_mode: | import jenkins.model.* instance = Jenkins.getInstance() instance.setMode(${jenkins_mode}) instance.save() - name: use the variable as the script jenkins_script: script: "{{ setmaster_mode }}" args: jenkins_mode: Node.Mode.EXCLUSIVE - name: interacting with an untrusted HTTPS connection jenkins_script: script: "println(Jenkins.instance.pluginManager.plugins)" user: admin password: admin url: https://localhost validate_certs: no ''' RETURN = ''' output: description: Result of script returned: success type: str sample: 'Result: true' ''' import json from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url from ansible.module_utils._text import to_native def is_csrf_protection_enabled(module): resp, info = fetch_url(module, module.params['url'] + '/api/json', method='GET') if info["status"] != 200: module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='') content = to_native(resp.read()) return json.loads(content).get('useCrumbs', False) def get_crumb(module): resp, info = fetch_url(module, module.params['url'] + '/crumbIssuer/api/json', method='GET') if info["status"] != 200: module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='') content = to_native(resp.read()) return json.loads(content) def main(): module = AnsibleModule( argument_spec=dict( script=dict(required=True, type="str"), url=dict(required=False, type="str", default="http://localhost:8080"), validate_certs=dict(required=False, type="bool", default=True), user=dict(required=False, type="str", default=None), password=dict(required=False, no_log=True, type="str", default=None), timeout=dict(required=False, type="int", default=10), args=dict(required=False, type="dict", default=None) ) ) if module.params['user'] is not None: if module.params['password'] is None: module.fail_json(msg="password required when user provided", output='') module.params['url_username'] = module.params['user'] module.params['url_password'] = module.params['password'] module.params['force_basic_auth'] = True if module.params['args'] is not None: from string import Template try: script_contents = Template(module.params['script']).substitute(module.params['args']) except KeyError as err: module.fail_json(msg="Error with templating variable: %s" % err, output='') else: script_contents = module.params['script'] headers = {} if is_csrf_protection_enabled(module): crumb = get_crumb(module) headers = {crumb['crumbRequestField']: crumb['crumb']} resp, info = fetch_url(module, module.params['url'] + "/scriptText", data=urlencode({'script': script_contents}), headers=headers, method="POST", timeout=module.params['timeout']) if info["status"] != 200: module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='') result = to_native(resp.read()) if 'Exception:' in result and 'at java.lang.Thread' in result: module.fail_json(msg="script failed with stacktrace:\n " + result, output='') module.exit_json( output=result, ) if __name__ == '__main__': main()
closed
ansible/ansible
https://github.com/ansible/ansible
62,075
ansible-test sanity on collections should ignore tests/output directory
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> ansible-test sanity also checks the automatically created tests/output directory and is failing the symlink test. This happens when running ansible-sanity on collection ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below Stable 2.9 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Running ansible-test sanity on a collection with python 3.7 <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Ignore the tests/output directory ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below Running sanity test 'symlinks' with Python 3.7 ERROR: Found 6 symlinks issue(s) which need to be resolved: ERROR: tests/output/.tmp/sanity/import/lib/ansible/module_utils:0:0: symlinks outside content tree are not allowed: ../../../../../../../../../../anshul_ansible/ansible/lib/ansible/module_utils ERROR: tests/output/.tmp/sanity/import/lib/ansible/utils/collection_loader.py:0:0: symlinks outside content tree are not allowed: ../../../../../../../../../../../anshul_ansible/ansible/lib/ansible/utils/collection_loader.py ERROR: tests/output/.tmp/sanity/import/lib/ansible/utils/singleton.py:0:0: symlinks outside content tree are not allowed: ../../../../../../../../../../../anshul_ansible/ansible/lib/ansible/utils/singleton.py ERROR: tests/output/.tmp/sanity/import/minimal-py37/bin/importer.py:0:0: symlinks outside content tree are not allowed: ../../../../../../../../../../anshul_ansible/ansible/test/lib/ansible_test/_data/sanity/import/importer.py ERROR: tests/output/.tmp/sanity/import/minimal-py37/bin/python:0:0: symlinks outside content tree are not allowed: ../../../../../../../../../../anshul_ansible/ansible/venv3/bin/python3 ERROR: tests/output/.tmp/sanity/import/minimal-py37/bin/python3:0:0: symlinks outside content tree are not allowed: ../../../../../../../../../../anshul_ansible/ansible/venv3/bin/python3 ```
https://github.com/ansible/ansible/issues/62075
https://github.com/ansible/ansible/pull/62084
0f52b18f3f42626ac1499a49d4a58c78d1a1ab32
f110abb8061fe6a629e12c782cef32ae2d3152e9
2019-09-10T15:32:16Z
python
2019-09-10T22:49:40Z
changelogs/fragments/ansible-test-ignore-tests-output.yml
closed
ansible/ansible
https://github.com/ansible/ansible
62,075
ansible-test sanity on collections should ignore tests/output directory
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> ansible-test sanity also checks the automatically created tests/output directory and is failing the symlink test. This happens when running ansible-sanity on collection ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below Stable 2.9 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> Running ansible-test sanity on a collection with python 3.7 <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> Ignore the tests/output directory ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below Running sanity test 'symlinks' with Python 3.7 ERROR: Found 6 symlinks issue(s) which need to be resolved: ERROR: tests/output/.tmp/sanity/import/lib/ansible/module_utils:0:0: symlinks outside content tree are not allowed: ../../../../../../../../../../anshul_ansible/ansible/lib/ansible/module_utils ERROR: tests/output/.tmp/sanity/import/lib/ansible/utils/collection_loader.py:0:0: symlinks outside content tree are not allowed: ../../../../../../../../../../../anshul_ansible/ansible/lib/ansible/utils/collection_loader.py ERROR: tests/output/.tmp/sanity/import/lib/ansible/utils/singleton.py:0:0: symlinks outside content tree are not allowed: ../../../../../../../../../../../anshul_ansible/ansible/lib/ansible/utils/singleton.py ERROR: tests/output/.tmp/sanity/import/minimal-py37/bin/importer.py:0:0: symlinks outside content tree are not allowed: ../../../../../../../../../../anshul_ansible/ansible/test/lib/ansible_test/_data/sanity/import/importer.py ERROR: tests/output/.tmp/sanity/import/minimal-py37/bin/python:0:0: symlinks outside content tree are not allowed: ../../../../../../../../../../anshul_ansible/ansible/venv3/bin/python3 ERROR: tests/output/.tmp/sanity/import/minimal-py37/bin/python3:0:0: symlinks outside content tree are not allowed: ../../../../../../../../../../anshul_ansible/ansible/venv3/bin/python3 ```
https://github.com/ansible/ansible/issues/62075
https://github.com/ansible/ansible/pull/62084
0f52b18f3f42626ac1499a49d4a58c78d1a1ab32
f110abb8061fe6a629e12c782cef32ae2d3152e9
2019-09-10T15:32:16Z
python
2019-09-10T22:49:40Z
test/lib/ansible_test/_internal/provider/source/unversioned.py
"""Fallback source provider when no other provider matches the content root.""" from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from ... import types as t from ...constants import ( TIMEOUT_PATH, ) from ...util import ( to_bytes, ) from . import ( SourceProvider, ) class UnversionedSource(SourceProvider): """Fallback source provider when no other provider matches the content root.""" sequence = 0 # disable automatic detection @staticmethod def is_content_root(path): # type: (str) -> bool """Return True if the given path is a content root for this provider.""" return False def get_paths(self, path): # type: (str) -> t.List[str] """Return the list of available content paths under the given path.""" paths = [] kill_any_dir = ( '.idea', '.pytest_cache', '__pycache__', 'ansible.egg-info', ) kill_sub_dir = { 'test/runner': ( '.tox', ), 'test': ( 'results', 'cache', 'output', ), 'docs/docsite': ( '_build', ), } kill_sub_file = { '': ( TIMEOUT_PATH, ), } kill_extensions = ( '.pyc', '.pyo', '.retry', ) for root, dir_names, file_names in os.walk(path): rel_root = os.path.relpath(root, path) if rel_root == '.': rel_root = '' for kill in kill_any_dir + kill_sub_dir.get(rel_root, ()): if kill in dir_names: dir_names.remove(kill) kill_files = kill_sub_file.get(rel_root, ()) paths.extend([os.path.join(rel_root, file_name) for file_name in file_names if not os.path.splitext(file_name)[1] in kill_extensions and file_name not in kill_files]) # include directory symlinks since they will not be traversed and would otherwise go undetected paths.extend([os.path.join(rel_root, dir_name) + os.path.sep for dir_name in dir_names if os.path.islink(to_bytes(dir_name))]) return paths
closed
ansible/ansible
https://github.com/ansible/ansible
62,105
ansible-test is removing the pip and setuptools
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> When I initiate a virtualenv with python 3.7 and then installing https://pypi.org/project/ansible/2.9.0b1/ inside it. Running sanity tests is causing the pip package to be removed from the venv ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> /bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> Same also happens with the latest stable-2.9 branch too ```paste below ansible==2.9.0b1 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml (.venv) abehl@ansh  ~/.ansible/collections/ansible_collections/sensu/sensu_go  which pip /tmp/an/.venv/bin/pip (.venv) abehl@ansh  ~/.ansible/collections/ansible_collections/sensu/sensu_go  ansible-test sanity --python 3.7 --requirements ... (.venv) ✘ abehl@ansh  ~/.ansible/collections/ansible_collections/sensu/sensu_go  which pip /usr/local/bin/pip (.venv) abehl@ansh  ~/.ansible/collections/ansible_collections/sensu/sensu_go  pip freeze ansible==2.9.0b1 ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> pip should not be removed ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/62105
https://github.com/ansible/ansible/pull/62111
cd8161a94827bd8dbae53610318f5957b56b8b98
a7bc11ce67ee58f384c82b6dcae4feede19044a5
2019-09-10T21:29:20Z
python
2019-09-11T02:27:12Z
changelogs/fragments/ansible-test-virtualenv-venv.yml
closed
ansible/ansible
https://github.com/ansible/ansible
62,105
ansible-test is removing the pip and setuptools
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> When I initiate a virtualenv with python 3.7 and then installing https://pypi.org/project/ansible/2.9.0b1/ inside it. Running sanity tests is causing the pip package to be removed from the venv ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> /bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> Same also happens with the latest stable-2.9 branch too ```paste below ansible==2.9.0b1 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml (.venv) abehl@ansh  ~/.ansible/collections/ansible_collections/sensu/sensu_go  which pip /tmp/an/.venv/bin/pip (.venv) abehl@ansh  ~/.ansible/collections/ansible_collections/sensu/sensu_go  ansible-test sanity --python 3.7 --requirements ... (.venv) ✘ abehl@ansh  ~/.ansible/collections/ansible_collections/sensu/sensu_go  which pip /usr/local/bin/pip (.venv) abehl@ansh  ~/.ansible/collections/ansible_collections/sensu/sensu_go  pip freeze ansible==2.9.0b1 ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> pip should not be removed ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/62105
https://github.com/ansible/ansible/pull/62111
cd8161a94827bd8dbae53610318f5957b56b8b98
a7bc11ce67ee58f384c82b6dcae4feede19044a5
2019-09-10T21:29:20Z
python
2019-09-11T02:27:12Z
test/lib/ansible_test/_data/virtualenvcheck.py
closed
ansible/ansible
https://github.com/ansible/ansible
62,105
ansible-test is removing the pip and setuptools
<!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below --> When I initiate a virtualenv with python 3.7 and then installing https://pypi.org/project/ansible/2.9.0b1/ inside it. Running sanity tests is causing the pip package to be removed from the venv ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure --> /bin/ansible-test ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> Same also happens with the latest stable-2.9 branch too ```paste below ansible==2.9.0b1 ``` ##### CONFIGURATION <!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes --> ```paste below ``` ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ```yaml (.venv) abehl@ansh  ~/.ansible/collections/ansible_collections/sensu/sensu_go  which pip /tmp/an/.venv/bin/pip (.venv) abehl@ansh  ~/.ansible/collections/ansible_collections/sensu/sensu_go  ansible-test sanity --python 3.7 --requirements ... (.venv) ✘ abehl@ansh  ~/.ansible/collections/ansible_collections/sensu/sensu_go  which pip /usr/local/bin/pip (.venv) abehl@ansh  ~/.ansible/collections/ansible_collections/sensu/sensu_go  pip freeze ansible==2.9.0b1 ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- Describe what you expected to happen when running the steps above --> pip should not be removed ##### ACTUAL RESULTS <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes --> ```paste below ```
https://github.com/ansible/ansible/issues/62105
https://github.com/ansible/ansible/pull/62111
cd8161a94827bd8dbae53610318f5957b56b8b98
a7bc11ce67ee58f384c82b6dcae4feede19044a5
2019-09-10T21:29:20Z
python
2019-09-11T02:27:12Z
test/lib/ansible_test/_internal/venv.py
"""Virtual environment management.""" from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from . import types as t from .config import ( EnvironmentConfig, ) from .util import ( find_python, SubprocessError, get_available_python_versions, SUPPORTED_PYTHON_VERSIONS, display, remove_tree, ) from .util_common import ( run_command, ) def create_virtual_environment(args, # type: EnvironmentConfig version, # type: str path, # type: str system_site_packages=False, # type: bool pip=True, # type: bool ): # type: (...) -> bool """Create a virtual environment using venv or virtualenv for the requested Python version.""" if os.path.isdir(path): display.info('Using existing Python %s virtual environment: %s' % (version, path), verbosity=1) return True python = find_python(version, required=False) python_version = tuple(int(v) for v in version.split('.')) if not python: # the requested python version could not be found return False if python_version >= (3, 0): # use the built-in 'venv' module on Python 3.x if run_venv(args, python, system_site_packages, pip, path): display.info('Created Python %s virtual environment using "venv": %s' % (version, path), verbosity=1) return True # something went wrong, most likely the package maintainer for the Python installation removed ensurepip # which will prevent creation of a virtual environment without installation of other OS packages # use the installed 'virtualenv' module on the Python requested version if run_virtualenv(args, python, python, system_site_packages, pip, path): display.info('Created Python %s virtual environment using "virtualenv": %s' % (version, path), verbosity=1) return True available_pythons = get_available_python_versions(SUPPORTED_PYTHON_VERSIONS) for available_python_version, available_python_interpreter in sorted(available_pythons.items()): virtualenv_version = get_virtualenv_version(args, available_python_interpreter) if not virtualenv_version: # virtualenv not available for this Python or we were unable to detect the version continue if python_version == (2, 6) and virtualenv_version >= (16, 0, 0): # virtualenv 16.0.0 dropped python 2.6 support: https://virtualenv.pypa.io/en/latest/changes/#v16-0-0-2018-05-16 continue # try using 'virtualenv' from another Python to setup the desired version if run_virtualenv(args, available_python_interpreter, python, system_site_packages, pip, path): display.info('Created Python %s virtual environment using "virtualenv" on Python %s: %s' % (version, available_python_version, path), verbosity=1) return True # no suitable 'virtualenv' available return False def run_venv(args, # type: EnvironmentConfig run_python, # type: str system_site_packages, # type: bool pip, # type: bool path, # type: str ): # type: (...) -> bool """Create a virtual environment using the 'venv' module. Not available on Python 2.x.""" cmd = [run_python, '-m', 'venv'] if system_site_packages: cmd.append('--system-site-packages') if not pip: cmd.append('--without-pip') cmd.append(path) try: run_command(args, cmd, capture=True) except SubprocessError as ex: remove_tree(path) if args.verbosity > 1: display.error(ex) return False return True def run_virtualenv(args, # type: EnvironmentConfig run_python, # type: str env_python, # type: str system_site_packages, # type: bool pip, # type: bool path, # type: str ): # type: (...) -> bool """Create a virtual environment using the 'virtualenv' module.""" cmd = [run_python, '-m', 'virtualenv'] if run_python != env_python: cmd += ['--python', env_python] if system_site_packages: cmd.append('--system-site-packages') if not pip: cmd.append('--no-pip') cmd.append(path) try: run_command(args, cmd, capture=True) except SubprocessError as ex: remove_tree(path) if args.verbosity > 1: display.error(ex) return False return True def get_virtualenv_version(args, python): # type: (EnvironmentConfig, str) -> t.Optional[t.Tuple[int, ...]] """Get the virtualenv version for the given python intepreter, if available.""" try: return get_virtualenv_version.result except AttributeError: pass get_virtualenv_version.result = None cmd = [python, '-m', 'virtualenv', '--version'] try: stdout = run_command(args, cmd, capture=True)[0] except SubprocessError as ex: if args.verbosity > 1: display.error(ex) stdout = '' if stdout: # noinspection PyBroadException try: get_virtualenv_version.result = tuple(int(v) for v in stdout.strip().split('.')) except Exception: # pylint: disable=broad-except pass return get_virtualenv_version.result