status
stringclasses 1
value | repo_name
stringclasses 31
values | repo_url
stringclasses 31
values | issue_id
int64 1
104k
| title
stringlengths 4
369
| body
stringlengths 0
254k
⌀ | issue_url
stringlengths 37
56
| pull_url
stringlengths 37
54
| before_fix_sha
stringlengths 40
40
| after_fix_sha
stringlengths 40
40
| report_datetime
timestamp[us, tz=UTC] | language
stringclasses 5
values | commit_datetime
timestamp[us, tz=UTC] | updated_file
stringlengths 4
188
| file_content
stringlengths 0
5.12M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 74,049 |
Investigate fragile strategy unit tests
|
### Summary
The units in `test/units/plugins/strategy/test_strategy.py` have become problematic, and fail randomly due to something in the test not properly tearing down ansible. Eventually the tests are killed in CI, and most frequently on python3.5.
See https://github.com/ansible/ansible/pull/74048
### Issue Type
Bug Report
### Component Name
test/units/plugins/strategy/test_strategy.py
### Ansible Version
```console (paste below)
$ ansible --version
2.11
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
```
### OS / Environment
default test container, frequently with Py3.5 in CI
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
### Expected Results
No hang in CI
### Actual Results
Transient hangs in CI
|
https://github.com/ansible/ansible/issues/74049
|
https://github.com/ansible/ansible/pull/78293
|
5e746932152743bf36d106bb1e3a5cbb334b1ec4
|
c6c9d90ca4cb21e796a5f1bda441250e04f97189
| 2021-03-26T18:10:05Z |
python
| 2022-07-19T15:18:22Z |
test/units/plugins/strategy/test_strategy.py
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.mock.loader import DictDataLoader
import uuid
from units.compat import unittest
from unittest.mock import patch, MagicMock
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.executor.task_result import TaskResult
from ansible.inventory.host import Host
from ansible.module_utils.six.moves import queue as Queue
from ansible.playbook.block import Block
from ansible.playbook.handler import Handler
from ansible.plugins.strategy import StrategyBase
import pytest
pytestmark = pytest.mark.skipif(True, reason="Temporarily disabled due to fragile tests that need rewritten")
class TestStrategyBase(unittest.TestCase):
def test_strategy_base_init(self):
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
def _queue_put(item, *args, **kwargs):
queue_items.append(item)
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_queue.put.side_effect = _queue_put
mock_tqm = MagicMock(TaskQueueManager)
mock_tqm._final_q = mock_queue
mock_tqm._workers = []
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base.cleanup()
def test_strategy_base_run(self):
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
def _queue_put(item, *args, **kwargs):
queue_items.append(item)
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_queue.put.side_effect = _queue_put
mock_tqm = MagicMock(TaskQueueManager)
mock_tqm._final_q = mock_queue
mock_tqm._stats = MagicMock()
mock_tqm.send_callback.return_value = None
for attr in ('RUN_OK', 'RUN_ERROR', 'RUN_FAILED_HOSTS', 'RUN_UNREACHABLE_HOSTS'):
setattr(mock_tqm, attr, getattr(TaskQueueManager, attr))
mock_iterator = MagicMock()
mock_iterator._play = MagicMock()
mock_iterator._play.handlers = []
mock_play_context = MagicMock()
mock_tqm._failed_hosts = dict()
mock_tqm._unreachable_hosts = dict()
mock_tqm._workers = []
strategy_base = StrategyBase(tqm=mock_tqm)
mock_host = MagicMock()
mock_host.name = 'host1'
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context), mock_tqm.RUN_OK)
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=TaskQueueManager.RUN_ERROR), mock_tqm.RUN_ERROR)
mock_tqm._failed_hosts = dict(host1=True)
mock_iterator.get_failed_hosts.return_value = [mock_host]
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), mock_tqm.RUN_FAILED_HOSTS)
mock_tqm._unreachable_hosts = dict(host1=True)
mock_iterator.get_failed_hosts.return_value = []
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), mock_tqm.RUN_UNREACHABLE_HOSTS)
strategy_base.cleanup()
def test_strategy_base_get_hosts(self):
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
def _queue_put(item, *args, **kwargs):
queue_items.append(item)
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_queue.put.side_effect = _queue_put
mock_hosts = []
for i in range(0, 5):
mock_host = MagicMock()
mock_host.name = "host%02d" % (i + 1)
mock_host.has_hostkey = True
mock_hosts.append(mock_host)
mock_hosts_names = [h.name for h in mock_hosts]
mock_inventory = MagicMock()
mock_inventory.get_hosts.return_value = mock_hosts
mock_tqm = MagicMock()
mock_tqm._final_q = mock_queue
mock_tqm.get_inventory.return_value = mock_inventory
mock_play = MagicMock()
mock_play.hosts = ["host%02d" % (i + 1) for i in range(0, 5)]
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base._hosts_cache = strategy_base._hosts_cache_all = mock_hosts_names
mock_tqm._failed_hosts = []
mock_tqm._unreachable_hosts = []
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), [h.name for h in mock_hosts])
mock_tqm._failed_hosts = ["host01"]
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), [h.name for h in mock_hosts[1:]])
self.assertEqual(strategy_base.get_failed_hosts(play=mock_play), [mock_hosts[0].name])
mock_tqm._unreachable_hosts = ["host02"]
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), [h.name for h in mock_hosts[2:]])
strategy_base.cleanup()
@patch.object(WorkerProcess, 'run')
def test_strategy_base_queue_task(self, mock_worker):
def fake_run(self):
return
mock_worker.run.side_effect = fake_run
fake_loader = DictDataLoader()
mock_var_manager = MagicMock()
mock_host = MagicMock()
mock_host.get_vars.return_value = dict()
mock_host.has_hostkey = True
mock_inventory = MagicMock()
mock_inventory.get.return_value = mock_host
tqm = TaskQueueManager(
inventory=mock_inventory,
variable_manager=mock_var_manager,
loader=fake_loader,
passwords=None,
forks=3,
)
tqm._initialize_processes(3)
tqm.hostvars = dict()
mock_task = MagicMock()
mock_task._uuid = 'abcd'
mock_task.throttle = 0
try:
strategy_base = StrategyBase(tqm=tqm)
strategy_base._queue_task(host=mock_host, task=mock_task, task_vars=dict(), play_context=MagicMock())
self.assertEqual(strategy_base._cur_worker, 1)
self.assertEqual(strategy_base._pending_results, 1)
strategy_base._queue_task(host=mock_host, task=mock_task, task_vars=dict(), play_context=MagicMock())
self.assertEqual(strategy_base._cur_worker, 2)
self.assertEqual(strategy_base._pending_results, 2)
strategy_base._queue_task(host=mock_host, task=mock_task, task_vars=dict(), play_context=MagicMock())
self.assertEqual(strategy_base._cur_worker, 0)
self.assertEqual(strategy_base._pending_results, 3)
finally:
tqm.cleanup()
def test_strategy_base_process_pending_results(self):
mock_tqm = MagicMock()
mock_tqm._terminated = False
mock_tqm._failed_hosts = dict()
mock_tqm._unreachable_hosts = dict()
mock_tqm.send_callback.return_value = None
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
def _queue_put(item, *args, **kwargs):
queue_items.append(item)
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_queue.put.side_effect = _queue_put
mock_tqm._final_q = mock_queue
mock_tqm._stats = MagicMock()
mock_tqm._stats.increment.return_value = None
mock_play = MagicMock()
mock_host = MagicMock()
mock_host.name = 'test01'
mock_host.vars = dict()
mock_host.get_vars.return_value = dict()
mock_host.has_hostkey = True
mock_task = MagicMock()
mock_task._role = None
mock_task._parent = None
mock_task.ignore_errors = False
mock_task.ignore_unreachable = False
mock_task._uuid = str(uuid.uuid4())
mock_task.loop = None
mock_task.copy.return_value = mock_task
mock_handler_task = Handler()
mock_handler_task.name = 'test handler'
mock_handler_task.action = 'foo'
mock_handler_task._parent = None
mock_handler_task._uuid = 'xxxxxxxxxxxxx'
mock_iterator = MagicMock()
mock_iterator._play = mock_play
mock_iterator.mark_host_failed.return_value = None
mock_iterator.get_next_task_for_host.return_value = (None, None)
mock_handler_block = MagicMock()
mock_handler_block.block = [mock_handler_task]
mock_handler_block.rescue = []
mock_handler_block.always = []
mock_play.handlers = [mock_handler_block]
mock_group = MagicMock()
mock_group.add_host.return_value = None
def _get_host(host_name):
if host_name == 'test01':
return mock_host
return None
def _get_group(group_name):
if group_name in ('all', 'foo'):
return mock_group
return None
mock_inventory = MagicMock()
mock_inventory._hosts_cache = dict()
mock_inventory.hosts.return_value = mock_host
mock_inventory.get_host.side_effect = _get_host
mock_inventory.get_group.side_effect = _get_group
mock_inventory.clear_pattern_cache.return_value = None
mock_inventory.get_host_vars.return_value = {}
mock_inventory.hosts.get.return_value = mock_host
mock_var_mgr = MagicMock()
mock_var_mgr.set_host_variable.return_value = None
mock_var_mgr.set_host_facts.return_value = None
mock_var_mgr.get_vars.return_value = dict()
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base._inventory = mock_inventory
strategy_base._variable_manager = mock_var_mgr
strategy_base._blocked_hosts = dict()
def _has_dead_workers():
return False
strategy_base._tqm.has_dead_workers.side_effect = _has_dead_workers
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 0)
task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(changed=True))
queue_items.append(task_result)
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
def mock_queued_task_cache():
return {
(mock_host.name, mock_task._uuid): {
'task': mock_task,
'host': mock_host,
'task_vars': {},
'play_context': {},
}
}
strategy_base._queued_task_cache = mock_queued_task_cache()
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"failed":true}')
queue_items.append(task_result)
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
mock_iterator.is_failed.return_value = True
strategy_base._queued_task_cache = mock_queued_task_cache()
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
# self.assertIn('test01', mock_tqm._failed_hosts)
# del mock_tqm._failed_hosts['test01']
mock_iterator.is_failed.return_value = False
task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"unreachable": true}')
queue_items.append(task_result)
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
strategy_base._queued_task_cache = mock_queued_task_cache()
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
self.assertIn('test01', mock_tqm._unreachable_hosts)
del mock_tqm._unreachable_hosts['test01']
task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"skipped": true}')
queue_items.append(task_result)
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
strategy_base._queued_task_cache = mock_queued_task_cache()
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_host=dict(host_name='newhost01', new_groups=['foo']))))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
strategy_base._queued_task_cache = mock_queued_task_cache()
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_group=dict(group_name='foo'))))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
strategy_base._queued_task_cache = mock_queued_task_cache()
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(changed=True, _ansible_notify=['test handler'])))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
strategy_base._queued_task_cache = mock_queued_task_cache()
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
self.assertTrue(mock_handler_task.is_host_notified(mock_host))
# queue_items.append(('set_host_var', mock_host, mock_task, None, 'foo', 'bar'))
# results = strategy_base._process_pending_results(iterator=mock_iterator)
# self.assertEqual(len(results), 0)
# self.assertEqual(strategy_base._pending_results, 1)
# queue_items.append(('set_host_facts', mock_host, mock_task, None, 'foo', dict()))
# results = strategy_base._process_pending_results(iterator=mock_iterator)
# self.assertEqual(len(results), 0)
# self.assertEqual(strategy_base._pending_results, 1)
# queue_items.append(('bad'))
# self.assertRaises(AnsibleError, strategy_base._process_pending_results, iterator=mock_iterator)
strategy_base.cleanup()
def test_strategy_base_load_included_file(self):
fake_loader = DictDataLoader({
"test.yml": """
- debug: msg='foo'
""",
"bad.yml": """
""",
})
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
def _queue_put(item, *args, **kwargs):
queue_items.append(item)
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_queue.put.side_effect = _queue_put
mock_tqm = MagicMock()
mock_tqm._final_q = mock_queue
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base._loader = fake_loader
strategy_base.cleanup()
mock_play = MagicMock()
mock_block = MagicMock()
mock_block._play = mock_play
mock_block.vars = dict()
mock_task = MagicMock()
mock_task._block = mock_block
mock_task._role = None
# NOTE Mocking calls below to account for passing parent_block=ti_copy.build_parent_block()
# into load_list_of_blocks() in _load_included_file. Not doing so meant that retrieving
# `collection` attr from parent would result in getting MagicMock instance
# instead of an empty list.
mock_task._parent = MagicMock()
mock_task.copy.return_value = mock_task
mock_task.build_parent_block.return_value = mock_block
mock_block._get_parent_attribute.return_value = None
mock_iterator = MagicMock()
mock_iterator.mark_host_failed.return_value = None
mock_inc_file = MagicMock()
mock_inc_file._task = mock_task
mock_inc_file._filename = "test.yml"
res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator)
self.assertEqual(len(res), 1)
self.assertTrue(isinstance(res[0], Block))
mock_inc_file._filename = "bad.yml"
res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator)
self.assertEqual(res, [])
@patch.object(WorkerProcess, 'run')
def test_strategy_base_run_handlers(self, mock_worker):
def fake_run(*args):
return
mock_worker.side_effect = fake_run
mock_play_context = MagicMock()
mock_handler_task = Handler()
mock_handler_task.action = 'foo'
mock_handler_task.cached_name = False
mock_handler_task.name = "test handler"
mock_handler_task.listen = []
mock_handler_task._role = None
mock_handler_task._parent = None
mock_handler_task._uuid = 'xxxxxxxxxxxxxxxx'
mock_handler = MagicMock()
mock_handler.block = [mock_handler_task]
mock_handler.flag_for_host.return_value = False
mock_play = MagicMock()
mock_play.handlers = [mock_handler]
mock_host = MagicMock(Host)
mock_host.name = "test01"
mock_host.has_hostkey = True
mock_inventory = MagicMock()
mock_inventory.get_hosts.return_value = [mock_host]
mock_inventory.get.return_value = mock_host
mock_inventory.get_host.return_value = mock_host
mock_var_mgr = MagicMock()
mock_var_mgr.get_vars.return_value = dict()
mock_iterator = MagicMock()
mock_iterator._play = mock_play
fake_loader = DictDataLoader()
tqm = TaskQueueManager(
inventory=mock_inventory,
variable_manager=mock_var_mgr,
loader=fake_loader,
passwords=None,
forks=5,
)
tqm._initialize_processes(3)
tqm.hostvars = dict()
try:
strategy_base = StrategyBase(tqm=tqm)
strategy_base._inventory = mock_inventory
task_result = TaskResult(mock_host.name, mock_handler_task._uuid, dict(changed=False))
strategy_base._queued_task_cache = dict()
strategy_base._queued_task_cache[(mock_host.name, mock_handler_task._uuid)] = {
'task': mock_handler_task,
'host': mock_host,
'task_vars': {},
'play_context': mock_play_context
}
tqm._final_q.put(task_result)
result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context)
finally:
strategy_base.cleanup()
tqm.cleanup()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,295 |
DNF - Comparison operators for package version not working
|
### Summary
It is not possible to use versions for installing packages like it is documented:
> - Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name>=1.0)
I tried this:
```yaml
- name: "Install packages"
become: true
ansible.builtin.dnf:
allow_downgrade: True
update_cache: True
state: present
name:
- mypackage>=0.1.2-3.el8
- mypackage>=0.1.2-3
- mypackage>=0.1.2
- C(mypackage>=0.1.2-3.el8)
```
The only thing that works is `mypackage-0.1.2-3.el8`.
### Issue Type
Bug Report
### Component Name
dnf
### Ansible Version
```console
$ ansible --version
ansible 2.9.27
config file = /home/ngoeddel/git/ansible-v2/ansible.cfg
configured module search path = ['/home/ngoeddel/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.6/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.6.8 (default, Apr 12 2022, 06:55:39) [GCC 8.5.0 20210514 (Red Hat 8.5.0-10)]
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
COLLECTIONS_PATHS(/home/ngoeddel/git/ansible-v2/ansible.cfg) = ['/home/ngoeddel/git/ansible-v2/collections']
COMMAND_WARNINGS(/home/ngoeddel/git/ansible-v2/ansible.cfg) = False
DEFAULT_FILTER_PLUGIN_PATH(/home/ngoeddel/git/ansible-v2/ansible.cfg) = ['/home/ngoeddel/git/ansible-v2/filter_plugins']
DEFAULT_HOST_LIST(/home/ngoeddel/git/ansible-v2/ansible.cfg) = ['/home/ngoeddel/git/ansible-v2/environment/openi-epu6/dev']
DEFAULT_ROLES_PATH(/home/ngoeddel/git/ansible-v2/ansible.cfg) = ['/home/ngoeddel/git/ansible-v2/roles']
DEFAULT_STDOUT_CALLBACK(/home/ngoeddel/git/ansible-v2/ansible.cfg) = debug
DEFAULT_STRATEGY(/home/ngoeddel/git/ansible-v2/ansible.cfg) = linear
DEFAULT_TIMEOUT(/home/ngoeddel/git/ansible-v2/ansible.cfg) = 30
DEFAULT_VAULT_PASSWORD_FILE(/home/ngoeddel/git/ansible-v2/ansible.cfg) = /home/ngoeddel/.my.ansible.cnf
DISPLAY_SKIPPED_HOSTS(/home/ngoeddel/git/ansible-v2/ansible.cfg) = False
INTERPRETER_PYTHON(/home/ngoeddel/git/ansible-v2/ansible.cfg) = /usr/bin/python3
RETRY_FILES_ENABLED(/home/ngoeddel/git/ansible-v2/ansible.cfg) = False
```
### OS / Environment
Rocky Linux release 8.6 (Green Obsidian)
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: "Install packages"
become: true
ansible.builtin.dnf:
allow_downgrade: True
update_cache: True
state: present
name:
- mypackage>=0.1.2-3.el8
- mypackage>=0.1.2-3
- mypackage>=0.1.2
- C(mypackage>=0.1.2-3.el8)
```
### Expected Results
I expect it to install the package `mypackage` in at least version 0.1.2-3.el8 or higher if not already installed.
### Actual Results
```console
fatal: [somehost]: FAILED! => {
"changed": false,
"failures": [
"No package C(mypackage>=0.1.2-4.el8) available."
],
"rc": 1,
"results": []
}
MSG:
Failed to install some of the specified packages
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78295
|
https://github.com/ansible/ansible/pull/78316
|
7aada8d499dd44bfdc351fb6ab9f5531ae24907b
|
b0a84cc9ca13b3feb0202c677a0547f9ef415f62
| 2022-07-19T10:26:30Z |
python
| 2022-07-21T17:39:17Z |
changelogs/fragments/78295-dnf-fix-comparison-operators-docs.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,295 |
DNF - Comparison operators for package version not working
|
### Summary
It is not possible to use versions for installing packages like it is documented:
> - Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name>=1.0)
I tried this:
```yaml
- name: "Install packages"
become: true
ansible.builtin.dnf:
allow_downgrade: True
update_cache: True
state: present
name:
- mypackage>=0.1.2-3.el8
- mypackage>=0.1.2-3
- mypackage>=0.1.2
- C(mypackage>=0.1.2-3.el8)
```
The only thing that works is `mypackage-0.1.2-3.el8`.
### Issue Type
Bug Report
### Component Name
dnf
### Ansible Version
```console
$ ansible --version
ansible 2.9.27
config file = /home/ngoeddel/git/ansible-v2/ansible.cfg
configured module search path = ['/home/ngoeddel/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.6/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.6.8 (default, Apr 12 2022, 06:55:39) [GCC 8.5.0 20210514 (Red Hat 8.5.0-10)]
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
COLLECTIONS_PATHS(/home/ngoeddel/git/ansible-v2/ansible.cfg) = ['/home/ngoeddel/git/ansible-v2/collections']
COMMAND_WARNINGS(/home/ngoeddel/git/ansible-v2/ansible.cfg) = False
DEFAULT_FILTER_PLUGIN_PATH(/home/ngoeddel/git/ansible-v2/ansible.cfg) = ['/home/ngoeddel/git/ansible-v2/filter_plugins']
DEFAULT_HOST_LIST(/home/ngoeddel/git/ansible-v2/ansible.cfg) = ['/home/ngoeddel/git/ansible-v2/environment/openi-epu6/dev']
DEFAULT_ROLES_PATH(/home/ngoeddel/git/ansible-v2/ansible.cfg) = ['/home/ngoeddel/git/ansible-v2/roles']
DEFAULT_STDOUT_CALLBACK(/home/ngoeddel/git/ansible-v2/ansible.cfg) = debug
DEFAULT_STRATEGY(/home/ngoeddel/git/ansible-v2/ansible.cfg) = linear
DEFAULT_TIMEOUT(/home/ngoeddel/git/ansible-v2/ansible.cfg) = 30
DEFAULT_VAULT_PASSWORD_FILE(/home/ngoeddel/git/ansible-v2/ansible.cfg) = /home/ngoeddel/.my.ansible.cnf
DISPLAY_SKIPPED_HOSTS(/home/ngoeddel/git/ansible-v2/ansible.cfg) = False
INTERPRETER_PYTHON(/home/ngoeddel/git/ansible-v2/ansible.cfg) = /usr/bin/python3
RETRY_FILES_ENABLED(/home/ngoeddel/git/ansible-v2/ansible.cfg) = False
```
### OS / Environment
Rocky Linux release 8.6 (Green Obsidian)
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: "Install packages"
become: true
ansible.builtin.dnf:
allow_downgrade: True
update_cache: True
state: present
name:
- mypackage>=0.1.2-3.el8
- mypackage>=0.1.2-3
- mypackage>=0.1.2
- C(mypackage>=0.1.2-3.el8)
```
### Expected Results
I expect it to install the package `mypackage` in at least version 0.1.2-3.el8 or higher if not already installed.
### Actual Results
```console
fatal: [somehost]: FAILED! => {
"changed": false,
"failures": [
"No package C(mypackage>=0.1.2-4.el8) available."
],
"rc": 1,
"results": []
}
MSG:
Failed to install some of the specified packages
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78295
|
https://github.com/ansible/ansible/pull/78316
|
7aada8d499dd44bfdc351fb6ab9f5531ae24907b
|
b0a84cc9ca13b3feb0202c677a0547f9ef415f62
| 2022-07-19T10:26:30Z |
python
| 2022-07-21T17:39:17Z |
lib/ansible/modules/dnf.py
|
# -*- coding: utf-8 -*-
# Copyright 2015 Cristian van Ee <cristian at cvee.org>
# Copyright 2015 Igor Gnatenko <[email protected]>
# Copyright 2018 Adam Miller <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: dnf
version_added: 1.9
short_description: Manages packages with the I(dnf) package manager
description:
- Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager.
options:
name:
description:
- "A package name or package specifier with version, like C(name-1.0).
When using state=latest, this can be '*' which means run: dnf -y update.
You can also pass a url or a local path to a rpm file.
To operate on several packages this can accept a comma separated string of packages or a list of packages."
- Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name>=1.0)
- You can also pass an absolute path for a binary which is provided by the package to install.
See examples for more information.
required: true
aliases:
- pkg
type: list
elements: str
list:
description:
- Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples.
type: str
state:
description:
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
- Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is
enabled for this module, then C(absent) is inferred.
choices: ['absent', 'present', 'installed', 'removed', 'latest']
type: str
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
type: list
elements: str
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
type: list
elements: str
conf_file:
description:
- The remote dnf configuration file to use for the transaction.
type: str
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if state is I(present) or I(latest).
- This setting affects packages installed from a repository as well as
"local" packages installed from the filesystem or a URL.
type: bool
default: 'no'
installroot:
description:
- Specifies an alternative installroot, relative to which all packages
will be installed.
version_added: "2.3"
default: "/"
type: str
releasever:
description:
- Specifies an alternative release from which all packages will be
installed.
version_added: "2.6"
type: str
autoremove:
description:
- If C(yes), removes all "leaf" packages from the system that were originally
installed as dependencies of user-installed packages but which are no longer
required by any such package. Should be used alone or when state is I(absent)
type: bool
default: "no"
version_added: "2.4"
exclude:
description:
- Package name(s) to exclude when state=present, or latest. This can be a
list or a comma separated string.
version_added: "2.7"
type: list
elements: str
skip_broken:
description:
- Skip all unavailable packages or packages with broken dependencies
without raising an error. Equivalent to passing the --skip-broken option.
type: bool
default: "no"
version_added: "2.7"
update_cache:
description:
- Force dnf to check if cache is out of date and redownload if needed.
Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
aliases: [ expire-cache ]
version_added: "2.7"
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- Has an effect only if state is I(latest)
default: "no"
type: bool
version_added: "2.7"
security:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked security related.
- Note that, similar to C(dnf upgrade-minimal), this filter applies to dependencies as well.
type: bool
default: "no"
version_added: "2.7"
bugfix:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related.
- Note that, similar to C(dnf upgrade-minimal), this filter applies to dependencies as well.
default: "no"
type: bool
version_added: "2.7"
enable_plugin:
description:
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
version_added: "2.7"
type: list
elements: str
disable_plugin:
description:
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
version_added: "2.7"
type: list
elements: str
disable_excludes:
description:
- Disable the excludes defined in DNF config files.
- If set to C(all), disables all excludes.
- If set to C(main), disable excludes defined in [main] in dnf.conf.
- If set to C(repoid), disable excludes defined for given repo id.
version_added: "2.7"
type: str
validate_certs:
description:
- This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
type: bool
default: "yes"
version_added: "2.7"
sslverify:
description:
- Disables SSL validation of the repository server for this transaction.
- This should be set to C(no) if one of the configured repositories is using an untrusted or self-signed certificate.
type: bool
default: "yes"
version_added: "2.13"
allow_downgrade:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
Note that setting allow_downgrade=True can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
package and others can cause changes to the packages which were
in the earlier transaction).
type: bool
default: "no"
version_added: "2.7"
install_repoquery:
description:
- This is effectively a no-op in DNF as it is not needed with DNF, but is an accepted parameter for feature
parity/compatibility with the I(yum) module.
type: bool
default: "yes"
version_added: "2.7"
download_only:
description:
- Only download the packages, do not install them.
default: "no"
type: bool
version_added: "2.7"
lock_timeout:
description:
- Amount of time to wait for the dnf lockfile to be freed.
required: false
default: 30
type: int
version_added: "2.8"
install_weak_deps:
description:
- Will also install all packages linked by a weak dependency relation.
type: bool
default: "yes"
version_added: "2.8"
download_dir:
description:
- Specifies an alternate directory to store packages.
- Has an effect only if I(download_only) is specified.
type: str
version_added: "2.8"
allowerasing:
description:
- If C(yes) it allows erasing of installed packages to resolve dependencies.
required: false
type: bool
default: "no"
version_added: "2.10"
nobest:
description:
- Set best option to False, so that transactions are not limited to best candidates only.
required: false
type: bool
default: "no"
version_added: "2.11"
cacheonly:
description:
- Tells dnf to run entirely from system cache; does not download or update metadata.
type: bool
default: "no"
version_added: "2.12"
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.flow
attributes:
action:
details: In the case of dnf, it has 2 action plugins that use it under the hood, M(ansible.builtin.yum) and M(ansible.builtin.package).
support: partial
async:
support: none
bypass_host_loop:
support: none
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: rhel
notes:
- When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the I(name) option.
- Group removal doesn't work if the group was installed with Ansible because
upstream dnf's API doesn't properly mark groups as installed, therefore upon
removal the module is unable to detect that the group is installed
(https://bugzilla.redhat.com/show_bug.cgi?id=1620324)
requirements:
- "python >= 2.6"
- python-dnf
- for the autoremove option you need dnf >= 2.0.1"
author:
- Igor Gnatenko (@ignatenkobrain) <[email protected]>
- Cristian van Ee (@DJMuggs) <cristian at cvee.org>
- Berend De Schouwer (@berenddeschouwer)
- Adam Miller (@maxamillion) <[email protected]>
'''
EXAMPLES = '''
- name: Install the latest version of Apache
ansible.builtin.dnf:
name: httpd
state: latest
- name: Install Apache >= 2.4
ansible.builtin.dnf:
name: httpd>=2.4
state: present
- name: Install the latest version of Apache and MariaDB
ansible.builtin.dnf:
name:
- httpd
- mariadb-server
state: latest
- name: Remove the Apache package
ansible.builtin.dnf:
name: httpd
state: absent
- name: Install the latest version of Apache from the testing repo
ansible.builtin.dnf:
name: httpd
enablerepo: testing
state: present
- name: Upgrade all packages
ansible.builtin.dnf:
name: "*"
state: latest
- name: Install the nginx rpm from a remote repo
ansible.builtin.dnf:
name: 'http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm'
state: present
- name: Install nginx rpm from a local file
ansible.builtin.dnf:
name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: Install Package based upon the file it provides
ansible.builtin.dnf:
name: /usr/bin/cowsay
state: present
- name: Install the 'Development tools' package group
ansible.builtin.dnf:
name: '@Development tools'
state: present
- name: Autoremove unneeded packages installed as dependencies
ansible.builtin.dnf:
autoremove: yes
- name: Uninstall httpd but keep its dependencies
ansible.builtin.dnf:
name: httpd
state: absent
autoremove: no
- name: Install a modularity appstream with defined stream and profile
ansible.builtin.dnf:
name: '@postgresql:9.6/client'
state: present
- name: Install a modularity appstream with defined stream
ansible.builtin.dnf:
name: '@postgresql:9.6'
state: present
- name: Install a modularity appstream with defined profile
ansible.builtin.dnf:
name: '@postgresql/client'
state: present
'''
import os
import re
import sys
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import fetch_file
from ansible.module_utils.six import PY2, text_type
from ansible.module_utils.compat.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
# NOTE dnf Python bindings import is postponed, see DnfModule._ensure_dnf(),
# because we need AnsibleModule object to use get_best_parsable_locale()
# to set proper locale before importing dnf to be able to scrape
# the output in some cases (FIXME?).
dnf = None
class DnfModule(YumDnf):
"""
DNF Ansible module back-end implementation
"""
def __init__(self, module):
# This populates instance vars for all argument spec params
super(DnfModule, self).__init__(module)
self._ensure_dnf()
self.lockfile = "/var/cache/dnf/*_lock.pid"
self.pkg_mgr_name = "dnf"
try:
self.with_modules = dnf.base.WITH_MODULES
except AttributeError:
self.with_modules = False
# DNF specific args that are not part of YumDnf
self.allowerasing = self.module.params['allowerasing']
self.nobest = self.module.params['nobest']
def is_lockfile_pid_valid(self):
# FIXME? it looks like DNF takes care of invalid lock files itself?
# https://github.com/ansible/ansible/issues/57189
return True
def _sanitize_dnf_error_msg_install(self, spec, error):
"""
For unhandled dnf.exceptions.Error scenarios, there are certain error
messages we want to filter in an install scenario. Do that here.
"""
if (
to_text("no package matched") in to_text(error) or
to_text("No match for argument:") in to_text(error)
):
return "No package {0} available.".format(spec)
return error
def _sanitize_dnf_error_msg_remove(self, spec, error):
"""
For unhandled dnf.exceptions.Error scenarios, there are certain error
messages we want to ignore in a removal scenario as known benign
failures. Do that here.
"""
if (
'no package matched' in to_native(error) or
'No match for argument:' in to_native(error)
):
return (False, "{0} is not installed".format(spec))
# Return value is tuple of:
# ("Is this actually a failure?", "Error Message")
return (True, error)
def _package_dict(self, package):
"""Return a dictionary of information for the package."""
# NOTE: This no longer contains the 'dnfstate' field because it is
# already known based on the query type.
result = {
'name': package.name,
'arch': package.arch,
'epoch': str(package.epoch),
'release': package.release,
'version': package.version,
'repo': package.repoid}
# envra format for alignment with the yum module
result['envra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format(**result)
# keep nevra key for backwards compat as it was previously
# defined with a value in envra format
result['nevra'] = result['envra']
if package.installtime == 0:
result['yumstate'] = 'available'
else:
result['yumstate'] = 'installed'
return result
def _split_package_arch(self, packagename):
# This list was auto generated on a Fedora 28 system with the following one-liner
# printf '[ '; for arch in $(ls /usr/lib/rpm/platform); do printf '"%s", ' ${arch%-linux}; done; printf ']\n'
redhat_rpm_arches = [
"aarch64", "alphaev56", "alphaev5", "alphaev67", "alphaev6", "alpha",
"alphapca56", "amd64", "armv3l", "armv4b", "armv4l", "armv5tejl", "armv5tel",
"armv5tl", "armv6hl", "armv6l", "armv7hl", "armv7hnl", "armv7l", "athlon",
"geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "m68k", "mips64el",
"mips64", "mips64r6el", "mips64r6", "mipsel", "mips", "mipsr6el", "mipsr6",
"noarch", "pentium3", "pentium4", "ppc32dy4", "ppc64iseries", "ppc64le", "ppc64",
"ppc64p7", "ppc64pseries", "ppc8260", "ppc8560", "ppciseries", "ppc", "ppcpseries",
"riscv64", "s390", "s390x", "sh3", "sh4a", "sh4", "sh", "sparc64", "sparc64v",
"sparc", "sparcv8", "sparcv9", "sparcv9v", "x86_64"
]
name, delimiter, arch = packagename.rpartition('.')
if name and arch and arch in redhat_rpm_arches:
return name, arch
return packagename, None
def _packagename_dict(self, packagename):
"""
Return a dictionary of information for a package name string or None
if the package name doesn't contain at least all NVR elements
"""
if packagename[-4:] == '.rpm':
packagename = packagename[:-4]
rpm_nevr_re = re.compile(r'(\S+)-(?:(\d*):)?(.*)-(~?\w+[\w.+]*)')
try:
arch = None
nevr, arch = self._split_package_arch(packagename)
if arch:
packagename = nevr
rpm_nevr_match = rpm_nevr_re.match(packagename)
if rpm_nevr_match:
name, epoch, version, release = rpm_nevr_re.match(packagename).groups()
if not version or not version.split('.')[0].isdigit():
return None
else:
return None
except AttributeError as e:
self.module.fail_json(
msg='Error attempting to parse package: %s, %s' % (packagename, to_native(e)),
rc=1,
results=[]
)
if not epoch:
epoch = "0"
if ':' in name:
epoch_name = name.split(":")
epoch = epoch_name[0]
name = ''.join(epoch_name[1:])
result = {
'name': name,
'epoch': epoch,
'release': release,
'version': version,
}
return result
# Original implementation from yum.rpmUtils.miscutils (GPLv2+)
# http://yum.baseurl.org/gitweb?p=yum.git;a=blob;f=rpmUtils/miscutils.py
def _compare_evr(self, e1, v1, r1, e2, v2, r2):
# return 1: a is newer than b
# 0: a and b are the same version
# -1: b is newer than a
if e1 is None:
e1 = '0'
else:
e1 = str(e1)
v1 = str(v1)
r1 = str(r1)
if e2 is None:
e2 = '0'
else:
e2 = str(e2)
v2 = str(v2)
r2 = str(r2)
# print '%s, %s, %s vs %s, %s, %s' % (e1, v1, r1, e2, v2, r2)
rc = dnf.rpm.rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
# print '%s, %s, %s vs %s, %s, %s = %s' % (e1, v1, r1, e2, v2, r2, rc)
return rc
def _ensure_dnf(self):
locale = get_best_parsable_locale(self.module)
os.environ['LC_ALL'] = os.environ['LC_MESSAGES'] = locale
os.environ['LANGUAGE'] = os.environ['LANG'] = locale
global dnf
try:
import dnf
import dnf.cli
import dnf.const
import dnf.exceptions
import dnf.subject
import dnf.util
HAS_DNF = True
except ImportError:
HAS_DNF = False
if HAS_DNF:
return
system_interpreters = ['/usr/libexec/platform-python',
'/usr/bin/python3',
'/usr/bin/python2',
'/usr/bin/python']
if not has_respawned():
# probe well-known system Python locations for accessible bindings, favoring py3
interpreter = probe_interpreters_for_module(system_interpreters, 'dnf')
if interpreter:
# respawn under the interpreter where the bindings should be found
respawn_module(interpreter)
# end of the line for this module, the process will exit here once the respawned module completes
# done all we can do, something is just broken (auto-install isn't useful anymore with respawn, so it was removed)
self.module.fail_json(
msg="Could not import the dnf python module using {0} ({1}). "
"Please install `python3-dnf` or `python2-dnf` package or ensure you have specified the "
"correct ansible_python_interpreter. (attempted {2})"
.format(sys.executable, sys.version.replace('\n', ''), system_interpreters),
results=[]
)
def _configure_base(self, base, conf_file, disable_gpg_check, installroot='/', sslverify=True):
"""Configure the dnf Base object."""
conf = base.conf
# Change the configuration file path if provided, this must be done before conf.read() is called
if conf_file:
# Fail if we can't read the configuration file.
if not os.access(conf_file, os.R_OK):
self.module.fail_json(
msg="cannot read configuration file", conf_file=conf_file,
results=[],
)
else:
conf.config_file_path = conf_file
# Read the configuration file
conf.read()
# Turn off debug messages in the output
conf.debuglevel = 0
# Set whether to check gpg signatures
conf.gpgcheck = not disable_gpg_check
conf.localpkg_gpgcheck = not disable_gpg_check
# Don't prompt for user confirmations
conf.assumeyes = True
# Set certificate validation
conf.sslverify = sslverify
# Set installroot
conf.installroot = installroot
# Load substitutions from the filesystem
conf.substitutions.update_from_etc(installroot)
# Handle different DNF versions immutable mutable datatypes and
# dnf v1/v2/v3
#
# In DNF < 3.0 are lists, and modifying them works
# In DNF >= 3.0 < 3.6 are lists, but modifying them doesn't work
# In DNF >= 3.6 have been turned into tuples, to communicate that modifying them doesn't work
#
# https://www.happyassassin.net/2018/06/27/adams-debugging-adventures-the-immutable-mutable-object/
#
# Set excludes
if self.exclude:
_excludes = list(conf.exclude)
_excludes.extend(self.exclude)
conf.exclude = _excludes
# Set disable_excludes
if self.disable_excludes:
_disable_excludes = list(conf.disable_excludes)
if self.disable_excludes not in _disable_excludes:
_disable_excludes.append(self.disable_excludes)
conf.disable_excludes = _disable_excludes
# Set releasever
if self.releasever is not None:
conf.substitutions['releasever'] = self.releasever
if conf.substitutions.get('releasever') is None:
self.module.warn(
'Unable to detect release version (use "releasever" option to specify release version)'
)
# values of conf.substitutions are expected to be strings
# setting this to an empty string instead of None appears to mimic the DNF CLI behavior
conf.substitutions['releasever'] = ''
# Set skip_broken (in dnf this is strict=0)
if self.skip_broken:
conf.strict = 0
# Set best
if self.nobest:
conf.best = 0
if self.download_only:
conf.downloadonly = True
if self.download_dir:
conf.destdir = self.download_dir
if self.cacheonly:
conf.cacheonly = True
# Default in dnf upstream is true
conf.clean_requirements_on_remove = self.autoremove
# Default in dnf (and module default) is True
conf.install_weak_deps = self.install_weak_deps
def _specify_repositories(self, base, disablerepo, enablerepo):
"""Enable and disable repositories matching the provided patterns."""
base.read_all_repos()
repos = base.repos
# Disable repositories
for repo_pattern in disablerepo:
if repo_pattern:
for repo in repos.get_matching(repo_pattern):
repo.disable()
# Enable repositories
for repo_pattern in enablerepo:
if repo_pattern:
for repo in repos.get_matching(repo_pattern):
repo.enable()
def _base(self, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot, sslverify):
"""Return a fully configured dnf Base object."""
base = dnf.Base()
self._configure_base(base, conf_file, disable_gpg_check, installroot, sslverify)
try:
# this method has been supported in dnf-4.2.17-6 or later
# https://bugzilla.redhat.com/show_bug.cgi?id=1788212
base.setup_loggers()
except AttributeError:
pass
try:
base.init_plugins(set(self.disable_plugin), set(self.enable_plugin))
base.pre_configure_plugins()
except AttributeError:
pass # older versions of dnf didn't require this and don't have these methods
self._specify_repositories(base, disablerepo, enablerepo)
try:
base.configure_plugins()
except AttributeError:
pass # older versions of dnf didn't require this and don't have these methods
try:
if self.update_cache:
try:
base.update_cache()
except dnf.exceptions.RepoError as e:
self.module.fail_json(
msg="{0}".format(to_text(e)),
results=[],
rc=1
)
base.fill_sack(load_system_repo='auto')
except dnf.exceptions.RepoError as e:
self.module.fail_json(
msg="{0}".format(to_text(e)),
results=[],
rc=1
)
add_security_filters = getattr(base, "add_security_filters", None)
if callable(add_security_filters):
filters = {}
if self.bugfix:
filters.setdefault('types', []).append('bugfix')
if self.security:
filters.setdefault('types', []).append('security')
if filters:
add_security_filters('eq', **filters)
else:
filters = []
if self.bugfix:
key = {'advisory_type__eq': 'bugfix'}
filters.append(base.sack.query().upgrades().filter(**key))
if self.security:
key = {'advisory_type__eq': 'security'}
filters.append(base.sack.query().upgrades().filter(**key))
if filters:
base._update_security_filters = filters
return base
def list_items(self, command):
"""List package info based on the command."""
# Rename updates to upgrades
if command == 'updates':
command = 'upgrades'
# Return the corresponding packages
if command in ['installed', 'upgrades', 'available']:
results = [
self._package_dict(package)
for package in getattr(self.base.sack.query(), command)()]
# Return the enabled repository ids
elif command in ['repos', 'repositories']:
results = [
{'repoid': repo.id, 'state': 'enabled'}
for repo in self.base.repos.iter_enabled()]
# Return any matching packages
else:
packages = dnf.subject.Subject(command).get_best_query(self.base.sack)
results = [self._package_dict(package) for package in packages]
self.module.exit_json(msg="", results=results)
def _is_installed(self, pkg):
installed = self.base.sack.query().installed()
package_spec = {}
name, arch = self._split_package_arch(pkg)
if arch:
package_spec['arch'] = arch
package_details = self._packagename_dict(pkg)
if package_details:
package_details['epoch'] = int(package_details['epoch'])
package_spec.update(package_details)
else:
package_spec['name'] = name
return bool(installed.filter(**package_spec))
def _is_newer_version_installed(self, pkg_name):
candidate_pkg = self._packagename_dict(pkg_name)
if not candidate_pkg:
# The user didn't provide a versioned rpm, so version checking is
# not required
return False
installed = self.base.sack.query().installed()
installed_pkg = installed.filter(name=candidate_pkg['name']).run()
if installed_pkg:
installed_pkg = installed_pkg[0]
# this looks weird but one is a dict and the other is a dnf.Package
evr_cmp = self._compare_evr(
installed_pkg.epoch, installed_pkg.version, installed_pkg.release,
candidate_pkg['epoch'], candidate_pkg['version'], candidate_pkg['release'],
)
return evr_cmp == 1
else:
return False
def _mark_package_install(self, pkg_spec, upgrade=False):
"""Mark the package for install."""
is_newer_version_installed = self._is_newer_version_installed(pkg_spec)
is_installed = self._is_installed(pkg_spec)
try:
if is_newer_version_installed:
if self.allow_downgrade:
# dnf only does allow_downgrade, we have to handle this ourselves
# because it allows a possibility for non-idempotent transactions
# on a system's package set (pending the yum repo has many old
# NVRs indexed)
if upgrade:
if is_installed: # Case 1
# TODO: Is this case reachable?
#
# _is_installed() demands a name (*not* NVR) or else is always False
# (wildcards are treated literally).
#
# Meanwhile, _is_newer_version_installed() demands something versioned
# or else is always false.
#
# I fail to see how they can both be true at the same time for any
# given pkg_spec. -re
self.base.upgrade(pkg_spec)
else: # Case 2
self.base.install(pkg_spec, strict=self.base.conf.strict)
else: # Case 3
self.base.install(pkg_spec, strict=self.base.conf.strict)
else: # Case 4, Nothing to do, report back
pass
elif is_installed: # A potentially older (or same) version is installed
if upgrade: # Case 5
self.base.upgrade(pkg_spec)
else: # Case 6, Nothing to do, report back
pass
else: # Case 7, The package is not installed, simply install it
self.base.install(pkg_spec, strict=self.base.conf.strict)
return {'failed': False, 'msg': '', 'failure': '', 'rc': 0}
except dnf.exceptions.MarkingError as e:
return {
'failed': True,
'msg': "No package {0} available.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
except dnf.exceptions.DepsolveError as e:
return {
'failed': True,
'msg': "Depsolve Error occurred for package {0}.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
except dnf.exceptions.Error as e:
if to_text("already installed") in to_text(e):
return {'failed': False, 'msg': '', 'failure': ''}
else:
return {
'failed': True,
'msg': "Unknown Error occurred for package {0}.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
def _whatprovides(self, filepath):
self.base.read_all_repos()
available = self.base.sack.query().available()
# Search in file
files_filter = available.filter(file=filepath)
# And Search in provides
pkg_spec = files_filter.union(available.filter(provides=filepath)).run()
if pkg_spec:
return pkg_spec[0].name
def _parse_spec_group_file(self):
pkg_specs, grp_specs, module_specs, filenames = [], [], [], []
already_loaded_comps = False # Only load this if necessary, it's slow
for name in self.names:
if '://' in name:
name = fetch_file(self.module, name)
filenames.append(name)
elif name.endswith(".rpm"):
filenames.append(name)
elif name.startswith('/'):
# like "dnf install /usr/bin/vi"
pkg_spec = self._whatprovides(name)
if pkg_spec:
pkg_specs.append(pkg_spec)
continue
elif name.startswith("@") or ('/' in name):
if not already_loaded_comps:
self.base.read_comps()
already_loaded_comps = True
grp_env_mdl_candidate = name[1:].strip()
if self.with_modules:
mdl = self.module_base._get_modules(grp_env_mdl_candidate)
if mdl[0]:
module_specs.append(grp_env_mdl_candidate)
else:
grp_specs.append(grp_env_mdl_candidate)
else:
grp_specs.append(grp_env_mdl_candidate)
else:
pkg_specs.append(name)
return pkg_specs, grp_specs, module_specs, filenames
def _update_only(self, pkgs):
not_installed = []
for pkg in pkgs:
if self._is_installed(pkg):
try:
if isinstance(to_text(pkg), text_type):
self.base.upgrade(pkg)
else:
self.base.package_upgrade(pkg)
except Exception as e:
self.module.fail_json(
msg="Error occurred attempting update_only operation: {0}".format(to_native(e)),
results=[],
rc=1,
)
else:
not_installed.append(pkg)
return not_installed
def _install_remote_rpms(self, filenames):
if int(dnf.__version__.split(".")[0]) >= 2:
pkgs = list(sorted(self.base.add_remote_rpms(list(filenames)), reverse=True))
else:
pkgs = []
try:
for filename in filenames:
pkgs.append(self.base.add_remote_rpm(filename))
except IOError as e:
if to_text("Can not load RPM file") in to_text(e):
self.module.fail_json(
msg="Error occurred attempting remote rpm install of package: {0}. {1}".format(filename, to_native(e)),
results=[],
rc=1,
)
if self.update_only:
self._update_only(pkgs)
else:
for pkg in pkgs:
try:
if self._is_newer_version_installed(self._package_dict(pkg)['nevra']):
if self.allow_downgrade:
self.base.package_install(pkg, strict=self.base.conf.strict)
else:
self.base.package_install(pkg, strict=self.base.conf.strict)
except Exception as e:
self.module.fail_json(
msg="Error occurred attempting remote rpm operation: {0}".format(to_native(e)),
results=[],
rc=1,
)
def _is_module_installed(self, module_spec):
if self.with_modules:
module_spec = module_spec.strip()
module_list, nsv = self.module_base._get_modules(module_spec)
enabled_streams = self.base._moduleContainer.getEnabledStream(nsv.name)
if enabled_streams:
if nsv.stream:
if nsv.stream in enabled_streams:
return True # The provided stream was found
else:
return False # The provided stream was not found
else:
return True # No stream provided, but module found
return False # seems like a sane default
def ensure(self):
response = {
'msg': "",
'changed': False,
'results': [],
'rc': 0
}
# Accumulate failures. Package management modules install what they can
# and fail with a message about what they can't.
failure_response = {
'msg': "",
'failures': [],
'results': [],
'rc': 1
}
# Autoremove is called alone
# Jump to remove path where base.autoremove() is run
if not self.names and self.autoremove:
self.names = []
self.state = 'absent'
if self.names == ['*'] and self.state == 'latest':
try:
self.base.upgrade_all()
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to upgrade all packages"
self.module.fail_json(**failure_response)
else:
pkg_specs, group_specs, module_specs, filenames = self._parse_spec_group_file()
pkg_specs = [p.strip() for p in pkg_specs]
filenames = [f.strip() for f in filenames]
groups = []
environments = []
for group_spec in (g.strip() for g in group_specs):
group = self.base.comps.group_by_pattern(group_spec)
if group:
groups.append(group.id)
else:
environment = self.base.comps.environment_by_pattern(group_spec)
if environment:
environments.append(environment.id)
else:
self.module.fail_json(
msg="No group {0} available.".format(group_spec),
results=[],
)
if self.state in ['installed', 'present']:
# Install files.
self._install_remote_rpms(filenames)
for filename in filenames:
response['results'].append("Installed {0}".format(filename))
# Install modules
if module_specs and self.with_modules:
for module in module_specs:
try:
if not self._is_module_installed(module):
response['results'].append("Module {0} installed.".format(module))
self.module_base.install([module])
self.module_base.enable([module])
except dnf.exceptions.MarkingErrors as e:
failure_response['failures'].append(' '.join((module, to_native(e))))
# Install groups.
for group in groups:
try:
group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
if group_pkg_count_installed == 0:
response['results'].append("Group {0} already installed.".format(group))
else:
response['results'].append("Group {0} installed.".format(group))
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to install group: {0}".format(group)
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
# In dnf 2.0 if all the mandatory packages in a group do
# not install, an error is raised. We want to capture
# this but still install as much as possible.
failure_response['failures'].append(" ".join((group, to_native(e))))
for environment in environments:
try:
self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to install environment: {0}".format(environment)
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((environment, to_native(e))))
if module_specs and not self.with_modules:
# This means that the group or env wasn't found in comps
self.module.fail_json(
msg="No group {0} available.".format(module_specs[0]),
results=[],
)
# Install packages.
if self.update_only:
not_installed = self._update_only(pkg_specs)
for spec in not_installed:
response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
else:
for pkg_spec in pkg_specs:
install_result = self._mark_package_install(pkg_spec)
if install_result['failed']:
if install_result['msg']:
failure_response['msg'] += install_result['msg']
failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
else:
if install_result['msg']:
response['results'].append(install_result['msg'])
elif self.state == 'latest':
# "latest" is same as "installed" for filenames.
self._install_remote_rpms(filenames)
for filename in filenames:
response['results'].append("Installed {0}".format(filename))
# Upgrade modules
if module_specs and self.with_modules:
for module in module_specs:
try:
if self._is_module_installed(module):
response['results'].append("Module {0} upgraded.".format(module))
self.module_base.upgrade([module])
except dnf.exceptions.MarkingErrors as e:
failure_response['failures'].append(' '.join((module, to_native(e))))
for group in groups:
try:
try:
self.base.group_upgrade(group)
response['results'].append("Group {0} upgraded.".format(group))
except dnf.exceptions.CompsError:
if not self.update_only:
# If not already installed, try to install.
group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
if group_pkg_count_installed == 0:
response['results'].append("Group {0} already installed.".format(group))
else:
response['results'].append("Group {0} installed.".format(group))
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((group, to_native(e))))
for environment in environments:
try:
try:
self.base.environment_upgrade(environment)
except dnf.exceptions.CompsError:
# If not already installed, try to install.
self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to install environment: {0}".format(environment)
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((environment, to_native(e))))
if self.update_only:
not_installed = self._update_only(pkg_specs)
for spec in not_installed:
response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
else:
for pkg_spec in pkg_specs:
# Previously we forced base.conf.best=True here.
# However in 2.11+ there is a self.nobest option, so defer to that.
# Note, however, that just because nobest isn't set, doesn't mean that
# base.conf.best is actually true. We only force it false in
# _configure_base(), we never set it to true, and it can default to false.
# Thus, we still need to explicitly set it here.
self.base.conf.best = not self.nobest
install_result = self._mark_package_install(pkg_spec, upgrade=True)
if install_result['failed']:
if install_result['msg']:
failure_response['msg'] += install_result['msg']
failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
else:
if install_result['msg']:
response['results'].append(install_result['msg'])
else:
# state == absent
if filenames:
self.module.fail_json(
msg="Cannot remove paths -- please specify package name.",
results=[],
)
# Remove modules
if module_specs and self.with_modules:
for module in module_specs:
try:
if self._is_module_installed(module):
response['results'].append("Module {0} removed.".format(module))
self.module_base.remove([module])
self.module_base.disable([module])
self.module_base.reset([module])
except dnf.exceptions.MarkingErrors as e:
failure_response['failures'].append(' '.join((module, to_native(e))))
for group in groups:
try:
self.base.group_remove(group)
except dnf.exceptions.CompsError:
# Group is already uninstalled.
pass
except AttributeError:
# Group either isn't installed or wasn't marked installed at install time
# because of DNF bug
#
# This is necessary until the upstream dnf API bug is fixed where installing
# a group via the dnf API doesn't actually mark the group as installed
# https://bugzilla.redhat.com/show_bug.cgi?id=1620324
pass
for environment in environments:
try:
self.base.environment_remove(environment)
except dnf.exceptions.CompsError:
# Environment is already uninstalled.
pass
installed = self.base.sack.query().installed()
for pkg_spec in pkg_specs:
# short-circuit installed check for wildcard matching
if '*' in pkg_spec:
try:
self.base.remove(pkg_spec)
except dnf.exceptions.MarkingError as e:
is_failure, handled_remove_error = self._sanitize_dnf_error_msg_remove(pkg_spec, to_native(e))
if is_failure:
failure_response['failures'].append('{0} - {1}'.format(pkg_spec, to_native(e)))
else:
response['results'].append(handled_remove_error)
continue
installed_pkg = dnf.subject.Subject(pkg_spec).get_best_query(
sack=self.base.sack).installed().run()
for pkg in installed_pkg:
self.base.remove(str(pkg))
# Like the dnf CLI we want to allow recursive removal of dependent
# packages
self.allowerasing = True
if self.autoremove:
self.base.autoremove()
try:
# NOTE for people who go down the rabbit hole of figuring out why
# resolve() throws DepsolveError here on dep conflict, but not when
# called from the CLI: It's controlled by conf.best. When best is
# set, Hawkey will fail the goal, and resolve() in dnf.base.Base
# will throw. Otherwise if it's not set, the update (install) will
# be (almost silently) removed from the goal, and Hawkey will report
# success. Note that in this case, similar to the CLI, skip_broken
# does nothing to help here, so we don't take it into account at
# all.
if not self.base.resolve(allow_erasing=self.allowerasing):
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.fail_json(**failure_response)
response['msg'] = "Nothing to do"
self.module.exit_json(**response)
else:
response['changed'] = True
# If packages got installed/removed, add them to the results.
# We do this early so we can use it for both check_mode and not.
if self.download_only:
install_action = 'Downloaded'
else:
install_action = 'Installed'
for package in self.base.transaction.install_set:
response['results'].append("{0}: {1}".format(install_action, package))
for package in self.base.transaction.remove_set:
response['results'].append("Removed: {0}".format(package))
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.fail_json(**failure_response)
if self.module.check_mode:
response['msg'] = "Check mode: No changes made, but would have if not in check mode"
self.module.exit_json(**response)
try:
if self.download_only and self.download_dir and self.base.conf.destdir:
dnf.util.ensure_dir(self.base.conf.destdir)
self.base.repos.all().pkgdir = self.base.conf.destdir
self.base.download_packages(self.base.transaction.install_set)
except dnf.exceptions.DownloadError as e:
self.module.fail_json(
msg="Failed to download packages: {0}".format(to_text(e)),
results=[],
)
# Validate GPG. This is NOT done in dnf.Base (it's done in the
# upstream CLI subclass of dnf.Base)
if not self.disable_gpg_check:
for package in self.base.transaction.install_set:
fail = False
gpgres, gpgerr = self.base._sig_check_pkg(package)
if gpgres == 0: # validated successfully
continue
elif gpgres == 1: # validation failed, install cert?
try:
self.base._get_key_for_package(package)
except dnf.exceptions.Error as e:
fail = True
else: # fatal error
fail = True
if fail:
msg = 'Failed to validate GPG signature for {0}: {1}'.format(package, gpgerr)
self.module.fail_json(msg)
if self.download_only:
# No further work left to do, and the results were already updated above.
# Just return them.
self.module.exit_json(**response)
else:
tid = self.base.do_transaction()
if tid is not None:
transaction = self.base.history.old([tid])[0]
if transaction.return_code:
failure_response['failures'].append(transaction.output())
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.fail_json(**failure_response)
self.module.exit_json(**response)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
if to_text("already installed") in to_text(e):
response['changed'] = False
response['results'].append("Package already installed: {0}".format(to_native(e)))
self.module.exit_json(**response)
else:
failure_response['msg'] = "Unknown Error occurred: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
def run(self):
"""The main function."""
# Check if autoremove is called correctly
if self.autoremove:
if LooseVersion(dnf.__version__) < LooseVersion('2.0.1'):
self.module.fail_json(
msg="Autoremove requires dnf>=2.0.1. Current dnf version is %s" % dnf.__version__,
results=[],
)
# Check if download_dir is called correctly
if self.download_dir:
if LooseVersion(dnf.__version__) < LooseVersion('2.6.2'):
self.module.fail_json(
msg="download_dir requires dnf>=2.6.2. Current dnf version is %s" % dnf.__version__,
results=[],
)
if self.update_cache and not self.names and not self.list:
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot, self.sslverify
)
self.module.exit_json(
msg="Cache updated",
changed=False,
results=[],
rc=0
)
# Set state as installed by default
# This is not set in AnsibleModule() because the following shouldn't happen
# - dnf: autoremove=yes state=installed
if self.state is None:
self.state = 'installed'
if self.list:
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot, self.sslverify
)
self.list_items(self.list)
else:
# Note: base takes a long time to run so we want to check for failure
# before running it.
if not self.download_only and not dnf.util.am_i_root():
self.module.fail_json(
msg="This command has to be run under the root user.",
results=[],
)
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot, self.sslverify
)
if self.with_modules:
self.module_base = dnf.module.module_base.ModuleBase(self.base)
self.ensure()
def main():
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
# Extend yumdnf_argument_spec with dnf-specific features that will never be
# backported to yum because yum is now in "maintenance mode" upstream
yumdnf_argument_spec['argument_spec']['allowerasing'] = dict(default=False, type='bool')
yumdnf_argument_spec['argument_spec']['nobest'] = dict(default=False, type='bool')
module = AnsibleModule(
**yumdnf_argument_spec
)
module_implementation = DnfModule(module)
try:
module_implementation.run()
except dnf.exceptions.RepoError as de:
module.fail_json(
msg="Failed to synchronize repodata: {0}".format(to_native(de)),
rc=1,
results=[],
changed=False
)
if __name__ == '__main__':
main()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,288 |
The version test can't handle ansible_version for an RC release
|
### Summary
When trying to compare `ansible_version.full` with a value using the [version test](https://docs.ansible.com/ansible/latest/user_guide/playbooks_tests.html#comparing-versions) for an RC release such as `2.13.2rc1` there is no good comparator to use:
* `strict` and `semver` consider this an invalid version number
* `loose` considers this a postrelease
I think Ansible should be able to correctly compare its own versioning scheme using the `strict` versioning, hence I'm filing this as a bug.
For reference, we're seeing this issue in https://github.com/DataDog/ansible-datadog/issues/445.
### Issue Type
~Bug Report~
Feature Idea
### Component Name
lib/ansible/module_utils/compat/version.py
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2rc1]
config file = /Users/slavek.kabrda/programming/ansible.cfg
configured module search path = ['/Users/slavek.kabrda/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/slavek.kabrda/programming/ansible-venv-rc/lib/python3.9/site-packages/ansible
ansible collection location = /Users/slavek.kabrda/.ansible/collections:/usr/share/ansible/collections
executable location = /Users/slavek.kabrda/programming/ansible-venv-rc/bin/ansible
python version = 3.9.13 (main, May 24 2022, 21:28:31) [Clang 13.1.6 (clang-1316.0.21.2)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
DEFAULT_CALLBACK_PLUGIN_PATH(/Users/slavek.kabrda/programming/ansible.cfg) = ['/Users/slavek.kabrda/.ansible/plugins/callback']
DEFAULT_HOST_LIST(/Users/slavek.kabrda/programming/ansible.cfg) = ['/Users/slavek.kabrda/hosts']
HOST_KEY_CHECKING(/Users/slavek.kabrda/programming/ansible.cfg) = False
CONNECTION:
==========
paramiko_ssh:
____________
host_key_checking(/Users/slavek.kabrda/programming/ansible.cfg) = False
ssh:
___
host_key_checking(/Users/slavek.kabrda/programming/ansible.cfg) = False
```
### OS / Environment
I think the only relevant environment is having an RC version of ansible-core, I have `2.13.2rc1`.
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: "Test ansible version"
command: "echo foo"
when: ansible_version.full is version("2.11", operator="lt", strict=True)
```
### Expected Results
Command passes.
### Actual Results
```console
The error was: Version comparison failed: invalid semantic version '2.13.2rc1'
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78288
|
https://github.com/ansible/ansible/pull/78308
|
9d4ced1237380051334b54379ff64e45c0341a6d
|
1429672213af648aef239138745d593e2920ebdd
| 2022-07-18T13:05:48Z |
python
| 2022-07-21T20:56:14Z |
changelogs/fragments/pep440-version-type.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,288 |
The version test can't handle ansible_version for an RC release
|
### Summary
When trying to compare `ansible_version.full` with a value using the [version test](https://docs.ansible.com/ansible/latest/user_guide/playbooks_tests.html#comparing-versions) for an RC release such as `2.13.2rc1` there is no good comparator to use:
* `strict` and `semver` consider this an invalid version number
* `loose` considers this a postrelease
I think Ansible should be able to correctly compare its own versioning scheme using the `strict` versioning, hence I'm filing this as a bug.
For reference, we're seeing this issue in https://github.com/DataDog/ansible-datadog/issues/445.
### Issue Type
~Bug Report~
Feature Idea
### Component Name
lib/ansible/module_utils/compat/version.py
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2rc1]
config file = /Users/slavek.kabrda/programming/ansible.cfg
configured module search path = ['/Users/slavek.kabrda/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/slavek.kabrda/programming/ansible-venv-rc/lib/python3.9/site-packages/ansible
ansible collection location = /Users/slavek.kabrda/.ansible/collections:/usr/share/ansible/collections
executable location = /Users/slavek.kabrda/programming/ansible-venv-rc/bin/ansible
python version = 3.9.13 (main, May 24 2022, 21:28:31) [Clang 13.1.6 (clang-1316.0.21.2)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
DEFAULT_CALLBACK_PLUGIN_PATH(/Users/slavek.kabrda/programming/ansible.cfg) = ['/Users/slavek.kabrda/.ansible/plugins/callback']
DEFAULT_HOST_LIST(/Users/slavek.kabrda/programming/ansible.cfg) = ['/Users/slavek.kabrda/hosts']
HOST_KEY_CHECKING(/Users/slavek.kabrda/programming/ansible.cfg) = False
CONNECTION:
==========
paramiko_ssh:
____________
host_key_checking(/Users/slavek.kabrda/programming/ansible.cfg) = False
ssh:
___
host_key_checking(/Users/slavek.kabrda/programming/ansible.cfg) = False
```
### OS / Environment
I think the only relevant environment is having an RC version of ansible-core, I have `2.13.2rc1`.
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: "Test ansible version"
command: "echo foo"
when: ansible_version.full is version("2.11", operator="lt", strict=True)
```
### Expected Results
Command passes.
### Actual Results
```console
The error was: Version comparison failed: invalid semantic version '2.13.2rc1'
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78288
|
https://github.com/ansible/ansible/pull/78308
|
9d4ced1237380051334b54379ff64e45c0341a6d
|
1429672213af648aef239138745d593e2920ebdd
| 2022-07-18T13:05:48Z |
python
| 2022-07-21T20:56:14Z |
docs/docsite/rst/playbook_guide/playbooks_tests.rst
|
.. _playbooks_tests:
*****
Tests
*****
`Tests <https://jinja.palletsprojects.com/en/latest/templates/#tests>`_ in Jinja are a way of evaluating template expressions and returning True or False. Jinja ships with many of these. See `builtin tests`_ in the official Jinja template documentation.
The main difference between tests and filters are that Jinja tests are used for comparisons, whereas filters are used for data manipulation, and have different applications in jinja. Tests can also be used in list processing filters, like ``map()`` and ``select()`` to choose items in the list.
Like all templating, tests always execute on the Ansible controller, **not** on the target of a task, as they test local data.
In addition to those Jinja2 tests, Ansible supplies a few more and users can easily create their own.
.. contents::
:local:
.. _test_syntax:
Test syntax
===========
`Test syntax <https://jinja.palletsprojects.com/en/latest/templates/#tests>`_ varies from `filter syntax <https://jinja.palletsprojects.com/en/latest/templates/#filters>`_ (``variable | filter``). Historically Ansible has registered tests as both jinja tests and jinja filters, allowing for them to be referenced using filter syntax.
As of Ansible 2.5, using a jinja test as a filter will generate a deprecation warning. As of Ansible 2.9+ using jinja test syntax is required.
The syntax for using a jinja test is as follows
.. code-block:: console
variable is test_name
Such as
.. code-block:: console
result is failed
.. _testing_strings:
Testing strings
===============
To match strings against a substring or a regular expression, use the ``match``, ``search`` or ``regex`` tests
.. code-block:: yaml
vars:
url: "https://example.com/users/foo/resources/bar"
tasks:
- debug:
msg: "matched pattern 1"
when: url is match("https://example.com/users/.*/resources")
- debug:
msg: "matched pattern 2"
when: url is search("users/.*/resources/.*")
- debug:
msg: "matched pattern 3"
when: url is search("users")
- debug:
msg: "matched pattern 4"
when: url is regex("example\.com/\w+/foo")
``match`` succeeds if it finds the pattern at the beginning of the string, while ``search`` succeeds if it finds the pattern anywhere within string. By default, ``regex`` works like ``search``, but ``regex`` can be configured to perform other tests as well, by passing the ``match_type`` keyword argument. In particular, ``match_type`` determines the ``re`` method that gets used to perform the search. The full list can be found in the relevant Python documentation `here <https://docs.python.org/3/library/re.html#regular-expression-objects>`_.
All of the string tests also take optional ``ignorecase`` and ``multiline`` arguments. These correspond to ``re.I`` and ``re.M`` from Python's ``re`` library, respectively.
.. _testing_vault:
Vault
=====
.. versionadded:: 2.10
You can test whether a variable is an inline single vault encrypted value using the ``vault_encrypted`` test.
.. code-block:: yaml
vars:
variable: !vault |
$ANSIBLE_VAULT;1.2;AES256;dev
61323931353866666336306139373937316366366138656131323863373866376666353364373761
3539633234313836346435323766306164626134376564330a373530313635343535343133316133
36643666306434616266376434363239346433643238336464643566386135356334303736353136
6565633133366366360a326566323363363936613664616364623437336130623133343530333739
3039
tasks:
- debug:
msg: '{{ (variable is vault_encrypted) | ternary("Vault encrypted", "Not vault encrypted") }}'
.. _testing_truthiness:
Testing truthiness
==================
.. versionadded:: 2.10
As of Ansible 2.10, you can now perform Python like truthy and falsy checks.
.. code-block:: yaml
- debug:
msg: "Truthy"
when: value is truthy
vars:
value: "some string"
- debug:
msg: "Falsy"
when: value is falsy
vars:
value: ""
Additionally, the ``truthy`` and ``falsy`` tests accept an optional parameter called ``convert_bool`` that will attempt
to convert boolean indicators to actual booleans.
.. code-block:: yaml
- debug:
msg: "Truthy"
when: value is truthy(convert_bool=True)
vars:
value: "yes"
- debug:
msg: "Falsy"
when: value is falsy(convert_bool=True)
vars:
value: "off"
.. _testing_versions:
Comparing versions
==================
.. versionadded:: 1.6
.. note:: In 2.5 ``version_compare`` was renamed to ``version``
To compare a version number, such as checking if the ``ansible_facts['distribution_version']``
version is greater than or equal to '12.04', you can use the ``version`` test.
The ``version`` test can also be used to evaluate the ``ansible_facts['distribution_version']``
.. code-block:: yaml+jinja
{{ ansible_facts['distribution_version'] is version('12.04', '>=') }}
If ``ansible_facts['distribution_version']`` is greater than or equal to 12.04, this test returns True, otherwise False.
The ``version`` test accepts the following operators
.. code-block:: console
<, lt, <=, le, >, gt, >=, ge, ==, =, eq, !=, <>, ne
This test also accepts a 3rd parameter, ``strict`` which defines if strict version parsing as defined by ``ansible.module_utils.compat.version.StrictVersion`` should be used. The default is ``False`` (using ``ansible.module_utils.compat.version.LooseVersion``), ``True`` enables strict version parsing
.. code-block:: yaml+jinja
{{ sample_version_var is version('1.0', operator='lt', strict=True) }}
As of Ansible 2.11 the ``version`` test accepts a ``version_type`` parameter which is mutually exclusive with ``strict``, and accepts the following values
.. code-block:: console
loose, strict, semver, semantic
Using ``version_type`` to compare a semantic version would be achieved like the following
.. code-block:: yaml+jinja
{{ sample_semver_var is version('2.0.0-rc.1+build.123', 'lt', version_type='semver') }}
When using ``version`` in a playbook or role, don't use ``{{ }}`` as described in the `FAQ <https://docs.ansible.com/ansible/latest/reference_appendices/faq.html#when-should-i-use-also-how-to-interpolate-variables-or-dynamic-variable-names>`_
.. code-block:: yaml
vars:
my_version: 1.2.3
tasks:
- debug:
msg: "my_version is higher than 1.0.0"
when: my_version is version('1.0.0', '>')
.. _math_tests:
Set theory tests
================
.. versionadded:: 2.1
.. note:: In 2.5 ``issubset`` and ``issuperset`` were renamed to ``subset`` and ``superset``
To see if a list includes or is included by another list, you can use 'subset' and 'superset'
.. code-block:: yaml
vars:
a: [1,2,3,4,5]
b: [2,3]
tasks:
- debug:
msg: "A includes B"
when: a is superset(b)
- debug:
msg: "B is included in A"
when: b is subset(a)
.. _contains_test:
Testing if a list contains a value
==================================
.. versionadded:: 2.8
Ansible includes a ``contains`` test which operates similarly, but in reverse of the Jinja2 provided ``in`` test.
The ``contains`` test is designed to work with the ``select``, ``reject``, ``selectattr``, and ``rejectattr`` filters
.. code-block:: yaml
vars:
lacp_groups:
- master: lacp0
network: 10.65.100.0/24
gateway: 10.65.100.1
dns4:
- 10.65.100.10
- 10.65.100.11
interfaces:
- em1
- em2
- master: lacp1
network: 10.65.120.0/24
gateway: 10.65.120.1
dns4:
- 10.65.100.10
- 10.65.100.11
interfaces:
- em3
- em4
tasks:
- debug:
msg: "{{ (lacp_groups|selectattr('interfaces', 'contains', 'em1')|first).master }}"
Testing if a list value is True
===============================
.. versionadded:: 2.4
You can use `any` and `all` to check if any or all elements in a list are true or not
.. code-block:: yaml
vars:
mylist:
- 1
- "{{ 3 == 3 }}"
- True
myotherlist:
- False
- True
tasks:
- debug:
msg: "all are true!"
when: mylist is all
- debug:
msg: "at least one is true"
when: myotherlist is any
.. _path_tests:
Testing paths
=============
.. note:: In 2.5 the following tests were renamed to remove the ``is_`` prefix
The following tests can provide information about a path on the controller
.. code-block:: yaml
- debug:
msg: "path is a directory"
when: mypath is directory
- debug:
msg: "path is a file"
when: mypath is file
- debug:
msg: "path is a symlink"
when: mypath is link
- debug:
msg: "path already exists"
when: mypath is exists
- debug:
msg: "path is {{ (mypath is abs)|ternary('absolute','relative')}}"
- debug:
msg: "path is the same file as path2"
when: mypath is same_file(path2)
- debug:
msg: "path is a mount"
when: mypath is mount
- debug:
msg: "path is a directory"
when: mypath is directory
vars:
mypath: /my/patth
- debug:
msg: "path is a file"
when: "'/my/path' is file"
Testing size formats
====================
The ``human_readable`` and ``human_to_bytes`` functions let you test your
playbooks to make sure you are using the right size format in your tasks, and that
you provide Byte format to computers and human-readable format to people.
Human readable
--------------
Asserts whether the given string is human readable or not.
For example
.. code-block:: yaml+jinja
- name: "Human Readable"
assert:
that:
- '"1.00 Bytes" == 1|human_readable'
- '"1.00 bits" == 1|human_readable(isbits=True)'
- '"10.00 KB" == 10240|human_readable'
- '"97.66 MB" == 102400000|human_readable'
- '"0.10 GB" == 102400000|human_readable(unit="G")'
- '"0.10 Gb" == 102400000|human_readable(isbits=True, unit="G")'
This would result in
.. code-block:: json
{ "changed": false, "msg": "All assertions passed" }
Human to bytes
--------------
Returns the given string in the Bytes format.
For example
.. code-block:: yaml+jinja
- name: "Human to Bytes"
assert:
that:
- "{{'0'|human_to_bytes}} == 0"
- "{{'0.1'|human_to_bytes}} == 0"
- "{{'0.9'|human_to_bytes}} == 1"
- "{{'1'|human_to_bytes}} == 1"
- "{{'10.00 KB'|human_to_bytes}} == 10240"
- "{{ '11 MB'|human_to_bytes}} == 11534336"
- "{{ '1.1 GB'|human_to_bytes}} == 1181116006"
- "{{'10.00 Kb'|human_to_bytes(isbits=True)}} == 10240"
This would result in
.. code-block:: json
{ "changed": false, "msg": "All assertions passed" }
.. _test_task_results:
Testing task results
====================
The following tasks are illustrative of the tests meant to check the status of tasks
.. code-block:: yaml
tasks:
- shell: /usr/bin/foo
register: result
ignore_errors: True
- debug:
msg: "it failed"
when: result is failed
# in most cases you'll want a handler, but if you want to do something right now, this is nice
- debug:
msg: "it changed"
when: result is changed
- debug:
msg: "it succeeded in Ansible >= 2.1"
when: result is succeeded
- debug:
msg: "it succeeded"
when: result is success
- debug:
msg: "it was skipped"
when: result is skipped
.. note:: From 2.1, you can also use success, failure, change, and skip so that the grammar matches, for those who need to be strict about it.
.. _type_tests:
Type Tests
==========
When looking to determine types, it may be tempting to use the ``type_debug`` filter and compare that to the string name of that type, however, you should instead use type test comparisons, such as:
.. code-block:: yaml
tasks:
- name: "String interpretation"
vars:
a_string: "A string"
a_dictionary: {"a": "dictionary"}
a_list: ["a", "list"]
assert:
that:
# Note that a string is classed as also being "iterable", "sequence" and "mapping"
- a_string is string
# Note that a dictionary is classed as not being a "string", but is "iterable", "sequence" and "mapping"
- a_dictionary is not string and a_dictionary is mapping
# Note that a list is classed as not being a "string" or "mapping" but is "iterable" and "sequence"
- a_list is not string and a_list is not mapping and a_list is iterable
- name: "Number interpretation"
vars:
a_float: 1.01
a_float_as_string: "1.01"
an_integer: 1
an_integer_as_string: "1"
assert:
that:
# Both a_float and an_integer are "number", but each has their own type as well
- a_float is number and a_float is float
- an_integer is number and an_integer is integer
# Both a_float_as_string and an_integer_as_string are not numbers
- a_float_as_string is not number and a_float_as_string is string
- an_integer_as_string is not number and a_float_as_string is string
# a_float or a_float_as_string when cast to a float and then to a string should match the same value cast only to a string
- a_float | float | string == a_float | string
- a_float_as_string | float | string == a_float_as_string | string
# Likewise an_integer and an_integer_as_string when cast to an integer and then to a string should match the same value cast only to an integer
- an_integer | int | string == an_integer | string
- an_integer_as_string | int | string == an_integer_as_string | string
# However, a_float or a_float_as_string cast as an integer and then a string does not match the same value cast to a string
- a_float | int | string != a_float | string
- a_float_as_string | int | string != a_float_as_string | string
# Again, Likewise an_integer and an_integer_as_string cast as a float and then a string does not match the same value cast to a string
- an_integer | float | string != an_integer | string
- an_integer_as_string | float | string != an_integer_as_string | string
- name: "Native Boolean interpretation"
loop:
- yes
- true
- True
- TRUE
- no
- No
- NO
- false
- False
- FALSE
assert:
that:
# Note that while other values may be cast to boolean values, these are the only ones which are natively considered boolean
# Note also that `yes` is the only case sensitive variant of these values.
- item is boolean
.. _builtin tests: https://jinja.palletsprojects.com/en/latest/templates/#builtin-tests
.. seealso::
:ref:`playbooks_intro`
An introduction to playbooks
:ref:`playbooks_conditionals`
Conditional statements in playbooks
:ref:`playbooks_variables`
All about variables
:ref:`playbooks_loops`
Looping in playbooks
:ref:`playbooks_reuse_roles`
Playbook organization by roles
:ref:`tips_and_tricks`
Tips and tricks for playbooks
`User Mailing List <https://groups.google.com/group/ansible-devel>`_
Have a question? Stop by the google group!
:ref:`communication_irc`
How to join Ansible chat channels
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,288 |
The version test can't handle ansible_version for an RC release
|
### Summary
When trying to compare `ansible_version.full` with a value using the [version test](https://docs.ansible.com/ansible/latest/user_guide/playbooks_tests.html#comparing-versions) for an RC release such as `2.13.2rc1` there is no good comparator to use:
* `strict` and `semver` consider this an invalid version number
* `loose` considers this a postrelease
I think Ansible should be able to correctly compare its own versioning scheme using the `strict` versioning, hence I'm filing this as a bug.
For reference, we're seeing this issue in https://github.com/DataDog/ansible-datadog/issues/445.
### Issue Type
~Bug Report~
Feature Idea
### Component Name
lib/ansible/module_utils/compat/version.py
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2rc1]
config file = /Users/slavek.kabrda/programming/ansible.cfg
configured module search path = ['/Users/slavek.kabrda/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/slavek.kabrda/programming/ansible-venv-rc/lib/python3.9/site-packages/ansible
ansible collection location = /Users/slavek.kabrda/.ansible/collections:/usr/share/ansible/collections
executable location = /Users/slavek.kabrda/programming/ansible-venv-rc/bin/ansible
python version = 3.9.13 (main, May 24 2022, 21:28:31) [Clang 13.1.6 (clang-1316.0.21.2)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
DEFAULT_CALLBACK_PLUGIN_PATH(/Users/slavek.kabrda/programming/ansible.cfg) = ['/Users/slavek.kabrda/.ansible/plugins/callback']
DEFAULT_HOST_LIST(/Users/slavek.kabrda/programming/ansible.cfg) = ['/Users/slavek.kabrda/hosts']
HOST_KEY_CHECKING(/Users/slavek.kabrda/programming/ansible.cfg) = False
CONNECTION:
==========
paramiko_ssh:
____________
host_key_checking(/Users/slavek.kabrda/programming/ansible.cfg) = False
ssh:
___
host_key_checking(/Users/slavek.kabrda/programming/ansible.cfg) = False
```
### OS / Environment
I think the only relevant environment is having an RC version of ansible-core, I have `2.13.2rc1`.
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: "Test ansible version"
command: "echo foo"
when: ansible_version.full is version("2.11", operator="lt", strict=True)
```
### Expected Results
Command passes.
### Actual Results
```console
The error was: Version comparison failed: invalid semantic version '2.13.2rc1'
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78288
|
https://github.com/ansible/ansible/pull/78308
|
9d4ced1237380051334b54379ff64e45c0341a6d
|
1429672213af648aef239138745d593e2920ebdd
| 2022-07-18T13:05:48Z |
python
| 2022-07-21T20:56:14Z |
lib/ansible/plugins/test/core.py
|
# (c) 2012, Jeroen Hoekx <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import operator as py_operator
from collections.abc import MutableMapping, MutableSequence
from ansible.module_utils.compat.version import LooseVersion, StrictVersion
from ansible import errors
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.utils.display import Display
from ansible.utils.version import SemanticVersion
display = Display()
def failed(result):
''' Test if task result yields failed '''
if not isinstance(result, MutableMapping):
raise errors.AnsibleFilterError("The 'failed' test expects a dictionary")
return result.get('failed', False)
def success(result):
''' Test if task result yields success '''
return not failed(result)
def unreachable(result):
''' Test if task result yields unreachable '''
if not isinstance(result, MutableMapping):
raise errors.AnsibleFilterError("The 'unreachable' test expects a dictionary")
return result.get('unreachable', False)
def reachable(result):
''' Test if task result yields reachable '''
return not unreachable(result)
def changed(result):
''' Test if task result yields changed '''
if not isinstance(result, MutableMapping):
raise errors.AnsibleFilterError("The 'changed' test expects a dictionary")
if 'changed' not in result:
changed = False
if (
'results' in result and # some modules return a 'results' key
isinstance(result['results'], MutableSequence) and
isinstance(result['results'][0], MutableMapping)
):
for res in result['results']:
if res.get('changed', False):
changed = True
break
else:
changed = result.get('changed', False)
return changed
def skipped(result):
''' Test if task result yields skipped '''
if not isinstance(result, MutableMapping):
raise errors.AnsibleFilterError("The 'skipped' test expects a dictionary")
return result.get('skipped', False)
def started(result):
''' Test if async task has started '''
if not isinstance(result, MutableMapping):
raise errors.AnsibleFilterError("The 'started' test expects a dictionary")
if 'started' in result:
# For async tasks, return status
# NOTE: The value of started is 0 or 1, not False or True :-/
return result.get('started', 0) == 1
else:
# For non-async tasks, warn user, but return as if started
display.warning("The 'started' test expects an async task, but a non-async task was tested")
return True
def finished(result):
''' Test if async task has finished '''
if not isinstance(result, MutableMapping):
raise errors.AnsibleFilterError("The 'finished' test expects a dictionary")
if 'finished' in result:
# For async tasks, return status
# NOTE: The value of finished is 0 or 1, not False or True :-/
return result.get('finished', 0) == 1
else:
# For non-async tasks, warn user, but return as if finished
display.warning("The 'finished' test expects an async task, but a non-async task was tested")
return True
def regex(value='', pattern='', ignorecase=False, multiline=False, match_type='search'):
''' Expose `re` as a boolean filter using the `search` method by default.
This is likely only useful for `search` and `match` which already
have their own filters.
'''
# In addition to ensuring the correct type, to_text here will ensure
# _fail_with_undefined_error happens if the value is Undefined
value = to_text(value, errors='surrogate_or_strict')
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
_re = re.compile(pattern, flags=flags)
return bool(getattr(_re, match_type, 'search')(value))
def vault_encrypted(value):
"""Evaulate whether a variable is a single vault encrypted value
.. versionadded:: 2.10
"""
return getattr(value, '__ENCRYPTED__', False) and value.is_encrypted()
def match(value, pattern='', ignorecase=False, multiline=False):
''' Perform a `re.match` returning a boolean '''
return regex(value, pattern, ignorecase, multiline, 'match')
def search(value, pattern='', ignorecase=False, multiline=False):
''' Perform a `re.search` returning a boolean '''
return regex(value, pattern, ignorecase, multiline, 'search')
def version_compare(value, version, operator='eq', strict=None, version_type=None):
''' Perform a version comparison on a value '''
op_map = {
'==': 'eq', '=': 'eq', 'eq': 'eq',
'<': 'lt', 'lt': 'lt',
'<=': 'le', 'le': 'le',
'>': 'gt', 'gt': 'gt',
'>=': 'ge', 'ge': 'ge',
'!=': 'ne', '<>': 'ne', 'ne': 'ne'
}
type_map = {
'loose': LooseVersion,
'strict': StrictVersion,
'semver': SemanticVersion,
'semantic': SemanticVersion,
}
if strict is not None and version_type is not None:
raise errors.AnsibleFilterError("Cannot specify both 'strict' and 'version_type'")
if not value:
raise errors.AnsibleFilterError("Input version value cannot be empty")
if not version:
raise errors.AnsibleFilterError("Version parameter to compare against cannot be empty")
Version = LooseVersion
if strict:
Version = StrictVersion
elif version_type:
try:
Version = type_map[version_type]
except KeyError:
raise errors.AnsibleFilterError(
"Invalid version type (%s). Must be one of %s" % (version_type, ', '.join(map(repr, type_map)))
)
if operator in op_map:
operator = op_map[operator]
else:
raise errors.AnsibleFilterError(
'Invalid operator type (%s). Must be one of %s' % (operator, ', '.join(map(repr, op_map)))
)
try:
method = getattr(py_operator, operator)
return method(Version(to_text(value)), Version(to_text(version)))
except Exception as e:
raise errors.AnsibleFilterError('Version comparison failed: %s' % to_native(e))
def truthy(value, convert_bool=False):
"""Evaluate as value for truthiness using python ``bool``
Optionally, attempt to do a conversion to bool from boolean like values
such as ``"false"``, ``"true"``, ``"yes"``, ``"no"``, ``"on"``, ``"off"``, etc.
.. versionadded:: 2.10
"""
if convert_bool:
try:
value = boolean(value)
except TypeError:
pass
return bool(value)
def falsy(value, convert_bool=False):
"""Evaluate as value for falsiness using python ``bool``
Optionally, attempt to do a conversion to bool from boolean like values
such as ``"false"``, ``"true"``, ``"yes"``, ``"no"``, ``"on"``, ``"off"``, etc.
.. versionadded:: 2.10
"""
return not truthy(value, convert_bool=convert_bool)
class TestModule(object):
''' Ansible core jinja2 tests '''
def tests(self):
return {
# failure testing
'failed': failed,
'failure': failed,
'succeeded': success,
'success': success,
'successful': success,
'reachable': reachable,
'unreachable': unreachable,
# changed testing
'changed': changed,
'change': changed,
# skip testing
'skipped': skipped,
'skip': skipped,
# async testing
'finished': finished,
'started': started,
# regex
'match': match,
'search': search,
'regex': regex,
# version comparison
'version_compare': version_compare,
'version': version_compare,
# lists
'any': any,
'all': all,
# truthiness
'truthy': truthy,
'falsy': falsy,
# vault
'vault_encrypted': vault_encrypted,
}
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,288 |
The version test can't handle ansible_version for an RC release
|
### Summary
When trying to compare `ansible_version.full` with a value using the [version test](https://docs.ansible.com/ansible/latest/user_guide/playbooks_tests.html#comparing-versions) for an RC release such as `2.13.2rc1` there is no good comparator to use:
* `strict` and `semver` consider this an invalid version number
* `loose` considers this a postrelease
I think Ansible should be able to correctly compare its own versioning scheme using the `strict` versioning, hence I'm filing this as a bug.
For reference, we're seeing this issue in https://github.com/DataDog/ansible-datadog/issues/445.
### Issue Type
~Bug Report~
Feature Idea
### Component Name
lib/ansible/module_utils/compat/version.py
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2rc1]
config file = /Users/slavek.kabrda/programming/ansible.cfg
configured module search path = ['/Users/slavek.kabrda/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/slavek.kabrda/programming/ansible-venv-rc/lib/python3.9/site-packages/ansible
ansible collection location = /Users/slavek.kabrda/.ansible/collections:/usr/share/ansible/collections
executable location = /Users/slavek.kabrda/programming/ansible-venv-rc/bin/ansible
python version = 3.9.13 (main, May 24 2022, 21:28:31) [Clang 13.1.6 (clang-1316.0.21.2)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
DEFAULT_CALLBACK_PLUGIN_PATH(/Users/slavek.kabrda/programming/ansible.cfg) = ['/Users/slavek.kabrda/.ansible/plugins/callback']
DEFAULT_HOST_LIST(/Users/slavek.kabrda/programming/ansible.cfg) = ['/Users/slavek.kabrda/hosts']
HOST_KEY_CHECKING(/Users/slavek.kabrda/programming/ansible.cfg) = False
CONNECTION:
==========
paramiko_ssh:
____________
host_key_checking(/Users/slavek.kabrda/programming/ansible.cfg) = False
ssh:
___
host_key_checking(/Users/slavek.kabrda/programming/ansible.cfg) = False
```
### OS / Environment
I think the only relevant environment is having an RC version of ansible-core, I have `2.13.2rc1`.
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: "Test ansible version"
command: "echo foo"
when: ansible_version.full is version("2.11", operator="lt", strict=True)
```
### Expected Results
Command passes.
### Actual Results
```console
The error was: Version comparison failed: invalid semantic version '2.13.2rc1'
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78288
|
https://github.com/ansible/ansible/pull/78308
|
9d4ced1237380051334b54379ff64e45c0341a6d
|
1429672213af648aef239138745d593e2920ebdd
| 2022-07-18T13:05:48Z |
python
| 2022-07-21T20:56:14Z |
lib/ansible/plugins/test/version.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,288 |
The version test can't handle ansible_version for an RC release
|
### Summary
When trying to compare `ansible_version.full` with a value using the [version test](https://docs.ansible.com/ansible/latest/user_guide/playbooks_tests.html#comparing-versions) for an RC release such as `2.13.2rc1` there is no good comparator to use:
* `strict` and `semver` consider this an invalid version number
* `loose` considers this a postrelease
I think Ansible should be able to correctly compare its own versioning scheme using the `strict` versioning, hence I'm filing this as a bug.
For reference, we're seeing this issue in https://github.com/DataDog/ansible-datadog/issues/445.
### Issue Type
~Bug Report~
Feature Idea
### Component Name
lib/ansible/module_utils/compat/version.py
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2rc1]
config file = /Users/slavek.kabrda/programming/ansible.cfg
configured module search path = ['/Users/slavek.kabrda/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/slavek.kabrda/programming/ansible-venv-rc/lib/python3.9/site-packages/ansible
ansible collection location = /Users/slavek.kabrda/.ansible/collections:/usr/share/ansible/collections
executable location = /Users/slavek.kabrda/programming/ansible-venv-rc/bin/ansible
python version = 3.9.13 (main, May 24 2022, 21:28:31) [Clang 13.1.6 (clang-1316.0.21.2)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
DEFAULT_CALLBACK_PLUGIN_PATH(/Users/slavek.kabrda/programming/ansible.cfg) = ['/Users/slavek.kabrda/.ansible/plugins/callback']
DEFAULT_HOST_LIST(/Users/slavek.kabrda/programming/ansible.cfg) = ['/Users/slavek.kabrda/hosts']
HOST_KEY_CHECKING(/Users/slavek.kabrda/programming/ansible.cfg) = False
CONNECTION:
==========
paramiko_ssh:
____________
host_key_checking(/Users/slavek.kabrda/programming/ansible.cfg) = False
ssh:
___
host_key_checking(/Users/slavek.kabrda/programming/ansible.cfg) = False
```
### OS / Environment
I think the only relevant environment is having an RC version of ansible-core, I have `2.13.2rc1`.
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: "Test ansible version"
command: "echo foo"
when: ansible_version.full is version("2.11", operator="lt", strict=True)
```
### Expected Results
Command passes.
### Actual Results
```console
The error was: Version comparison failed: invalid semantic version '2.13.2rc1'
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78288
|
https://github.com/ansible/ansible/pull/78308
|
9d4ced1237380051334b54379ff64e45c0341a6d
|
1429672213af648aef239138745d593e2920ebdd
| 2022-07-18T13:05:48Z |
python
| 2022-07-21T20:56:14Z |
lib/ansible/plugins/test/version_compare.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,288 |
The version test can't handle ansible_version for an RC release
|
### Summary
When trying to compare `ansible_version.full` with a value using the [version test](https://docs.ansible.com/ansible/latest/user_guide/playbooks_tests.html#comparing-versions) for an RC release such as `2.13.2rc1` there is no good comparator to use:
* `strict` and `semver` consider this an invalid version number
* `loose` considers this a postrelease
I think Ansible should be able to correctly compare its own versioning scheme using the `strict` versioning, hence I'm filing this as a bug.
For reference, we're seeing this issue in https://github.com/DataDog/ansible-datadog/issues/445.
### Issue Type
~Bug Report~
Feature Idea
### Component Name
lib/ansible/module_utils/compat/version.py
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2rc1]
config file = /Users/slavek.kabrda/programming/ansible.cfg
configured module search path = ['/Users/slavek.kabrda/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/slavek.kabrda/programming/ansible-venv-rc/lib/python3.9/site-packages/ansible
ansible collection location = /Users/slavek.kabrda/.ansible/collections:/usr/share/ansible/collections
executable location = /Users/slavek.kabrda/programming/ansible-venv-rc/bin/ansible
python version = 3.9.13 (main, May 24 2022, 21:28:31) [Clang 13.1.6 (clang-1316.0.21.2)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
DEFAULT_CALLBACK_PLUGIN_PATH(/Users/slavek.kabrda/programming/ansible.cfg) = ['/Users/slavek.kabrda/.ansible/plugins/callback']
DEFAULT_HOST_LIST(/Users/slavek.kabrda/programming/ansible.cfg) = ['/Users/slavek.kabrda/hosts']
HOST_KEY_CHECKING(/Users/slavek.kabrda/programming/ansible.cfg) = False
CONNECTION:
==========
paramiko_ssh:
____________
host_key_checking(/Users/slavek.kabrda/programming/ansible.cfg) = False
ssh:
___
host_key_checking(/Users/slavek.kabrda/programming/ansible.cfg) = False
```
### OS / Environment
I think the only relevant environment is having an RC version of ansible-core, I have `2.13.2rc1`.
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: "Test ansible version"
command: "echo foo"
when: ansible_version.full is version("2.11", operator="lt", strict=True)
```
### Expected Results
Command passes.
### Actual Results
```console
The error was: Version comparison failed: invalid semantic version '2.13.2rc1'
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78288
|
https://github.com/ansible/ansible/pull/78308
|
9d4ced1237380051334b54379ff64e45c0341a6d
|
1429672213af648aef239138745d593e2920ebdd
| 2022-07-18T13:05:48Z |
python
| 2022-07-21T20:56:14Z |
test/integration/targets/test_core/tasks/main.yml
|
- name: Failure
set_fact:
hello: world
failed_when: true
ignore_errors: yes
register: intentional_failure
- name: Success
set_fact:
hello: world
register: intentional_success
- name: Try failure test on non-dictionary
set_fact:
hello: "{{ 'nope' is failure }}"
ignore_errors: yes
register: misuse_of_failure
- name: Assert failure tests work
assert:
that:
- intentional_failure is failed # old name
- intentional_failure is failure
- intentional_success is not failure
- misuse_of_failure is failed
- name: Assert successful tests work
assert:
that:
- intentional_success is succeeded # old name
- intentional_success is success # old name
- intentional_success is successful
- intentional_failure is not successful
- name: Try reachable host
command: id
register: reachable_host
- name: Try unreachable host
command: id
delegate_to: unreachable
ignore_unreachable: yes
ignore_errors: yes
register: unreachable_host
- name: Try reachable test on non-dictionary
set_fact:
hello: "{{ 'nope' is reachable }}"
ignore_errors: yes
register: misuse_of_reachable
- name: Assert reachable tests work
assert:
that:
- misuse_of_reachable is failed
- reachable_host is reachable
- unreachable_host is not reachable
- name: Try unreachable test on non-dictionary
set_fact:
hello: "{{ 'nope' is unreachable }}"
ignore_errors: yes
register: misuse_of_unreachable
- name: Assert unreachable tests work
assert:
that:
- misuse_of_unreachable is failed
- reachable_host is not unreachable
- unreachable_host is unreachable
- name: Make changes
file:
path: dir_for_changed
state: directory
register: directory_created
- name: Make no changes
file:
path: dir_for_changed
state: directory
register: directory_unchanged
- name: Try changed test on non-dictionary
set_fact:
hello: "{{ 'nope' is changed }}"
ignore_errors: yes
register: misuse_of_changed
# providing artificial task results since there are no modules in ansible-core that provide a 'results' list instead of 'changed'
- name: Prepare artificial task results
set_fact:
results_all_changed:
results:
- changed: true
- changed: true
results_some_changed:
results:
- changed: true
- changed: false
results_none_changed:
results:
- changed: false
- changed: false
results_missing_changed: {}
- name: Assert changed tests work
assert:
that:
- directory_created is changed
- directory_unchanged is not changed
- misuse_of_changed is failed
- results_all_changed is changed
- results_some_changed is changed
- results_none_changed is not changed
- results_missing_changed is not changed
- name: Skip me
set_fact:
hello: world
when: false
register: skipped_task
- name: Don't skip me
set_fact:
hello: world
register: executed_task
- name: Try skipped test on non-dictionary
set_fact:
hello: "{{ 'nope' is skipped }}"
ignore_errors: yes
register: misuse_of_skipped
- name: Assert skipped tests work
assert:
that:
- skipped_task is skipped
- executed_task is not skipped
- misuse_of_skipped is failure
- name: Not an async task
set_fact:
hello: world
register: non_async_task
- name: Complete an async task
command: id
async: 10
poll: 1
register: async_completed
- name: Start an async task without waiting for completion
shell: sleep 3
async: 10
poll: 0
register: async_incomplete
- name: Try finished test on non-dictionary
set_fact:
hello: "{{ 'nope' is finished }}"
ignore_errors: yes
register: misuse_of_finished
- name: Assert finished tests work (warning expected)
assert:
that:
- non_async_task is finished
- misuse_of_finished is failed
- async_completed is finished
- async_incomplete is not finished
- name: Try started test on non-dictionary
set_fact:
hello: "{{ 'nope' is started }}"
ignore_errors: yes
register: misuse_of_started
- name: Assert started tests work (warning expected)
assert:
that:
- non_async_task is started
- misuse_of_started is failed
- async_completed is started
- async_incomplete is started
- name: Assert match tests work
assert:
that:
- "'hello' is match('h.ll.')"
- "'hello' is not match('.ll.')"
- name: Assert search tests work
assert:
that:
- "'hello' is search('.l')"
- "'hello' is not search('nope')"
- name: Assert regex tests work
assert:
that:
- "'hello' is regex('.l')"
- "'hello' is regex('.L', ignorecase=true)"
- "'hello\nAnsible' is regex('^Ansible', multiline=true)"
- "'hello' is not regex('.L')"
- "'hello\nAnsible' is not regex('^Ansible')"
- name: Try version tests with bad operator
set_fact:
result: "{{ '1.0' is version('1.0', 'equals') }}"
ignore_errors: yes
register: version_bad_operator
- name: Try version tests with bad value
set_fact:
result: "{{ '1.0' is version('nope', '==', true) }}"
ignore_errors: yes
register: version_bad_value
- name: Try version with both strict and version_type
debug:
msg: "{{ '1.0' is version('1.0', strict=False, version_type='loose') }}"
ignore_errors: yes
register: version_strict_version_type
- name: Try version with bad version_type
debug:
msg: "{{ '1.0' is version('1.0', version_type='boom') }}"
ignore_errors: yes
register: version_bad_version_type
- name: Try version with bad semver
debug:
msg: "{{ 'nope' is version('nopenope', version_type='semver') }}"
ignore_errors: yes
register: version_bad_semver
- name: Try version with empty input value
debug:
msg: "{{ '' is version('1.0', '>') }}"
ignore_errors: yes
register: version_empty_input
- name: Try version with empty comparison value
debug:
msg: "{{ '1.0' is version('', '>') }}"
ignore_errors: yes
register: version_empty_comparison
- name: Try version with empty input and comparison values
debug:
msg: "{{ '' is version('', '>') }}"
ignore_errors: yes
register: version_empty_both
- name: Assert version tests work
assert:
that:
- "'1.0' is version_compare('1.0', '==')" # old name
- "'1.0' is version('1.0', '==')"
- "'1.0' is version('2.0', '!=')"
- "'1.0' is version('2.0', '<')"
- "'2.0' is version('1.0', '>')"
- "'1.0' is version('1.0', '<=')"
- "'1.0' is version('1.0', '>=')"
- "'1.0' is version_compare('1.0', '==', true)" # old name
- "'1.0' is version('1.0', '==', true)"
- "'1.0' is version('2.0', '!=', true)"
- "'1.0' is version('2.0', '<', true)"
- "'2.0' is version('1.0', '>', true)"
- "'1.0' is version('1.0', '<=', true)"
- "'1.0' is version('1.0', '>=', true)"
- "'1.2.3' is version('2.0.0', 'lt', version_type='semver')"
- version_bad_operator is failed
- version_bad_value is failed
- version_strict_version_type is failed
- version_bad_version_type is failed
- version_bad_semver is failed
- version_empty_input is failed
- version_empty_input is search('version value cannot be empty')
- version_empty_comparison is failed
- version_empty_comparison is search('to compare against cannot be empty')
- version_empty_both is failed
- version_empty_both is search('version value cannot be empty')
- name: Assert any tests work
assert:
that:
- "[true, false] is any"
- "[false] is not any"
- name: Assert all tests work
assert:
that:
- "[true] is all"
- "[true, false] is not all"
- name: Assert truthy tests work
assert:
that:
- '"string" is truthy'
- '"" is not truthy'
- True is truthy
- False is not truthy
- true is truthy
- false is not truthy
- 1 is truthy
- 0 is not truthy
- '[""] is truthy'
- '[] is not truthy'
- '"on" is truthy(convert_bool=True)'
- '"off" is not truthy(convert_bool=True)'
- '"fred" is truthy(convert_bool=True)'
- '{} is not truthy'
- '{"key": "value"} is truthy'
- name: Assert falsy tests work
assert:
that:
- '"string" is not falsy'
- '"" is falsy'
- True is not falsy
- False is falsy
- true is not falsy
- false is falsy
- 1 is not falsy
- 0 is falsy
- '[""] is not falsy'
- '[] is falsy'
- '"on" is not falsy(convert_bool=True)'
- '"off" is falsy(convert_bool=True)'
- '{} is falsy'
- '{"key": "value"} is not falsy'
- name: Create vaulted variable for vault_encrypted test
set_fact:
vaulted_value: !vault |
$ANSIBLE_VAULT;1.1;AES256
35323961353038346165643738646465376139363061353835303739663538343266303232326635
3365353662646236356665323135633630656238316530640a663362363763633436373439663031
33663433383037396438656464636433653837376361313638366362333037323961316364363363
3835616438623261650a636164376534376661393134326662326362323131373964313961623365
3833
- name: Assert vault_encrypted tests work
assert:
that:
- vaulted_value is vault_encrypted
- inventory_hostname is not vault_encrypted
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 77,911 |
ansible-galaxy fails to install collection from GalaxyNG when there are many versions
|
### Summary
Given I have installed GalaxyNG and published custom content and synchronized content from galaxy.ansible.com, when I try to install collections listed in a `requirements.yml` via a modern version of `ansible-galaxy` configured to use GalaxyNG, `ansible-galaxy` fails when encountering the `community.vmware` collection.
If you comment out `community.vmware` from `requirements.yml`, all other collections (custom published and synchronized) install fine.
If you uncomment `community.vmware` and use an older version of `ansible-galaxy` (e.g., Ansible 2.9.12), all collections install fine.
### Issue Type
Bug Report
### Component Name
ansible-galaxy
### Ansible Version
```console
$ ansible --version
# works (Ansible 2.9.12 installed via pip)
ansible 2.9.12
config file = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg
configured module search path = ['/home/ben/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ben/venv3_ansible-2.9.12/lib/python3.8/site-packages/ansible
executable location = /home/ben/venv3_ansible-2.9.12/bin/ansible
python version = 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
# fails (Ansible 4.2.0 installed via pip)
ansible [core 2.11.2]
config file = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg
configured module search path = ['/home/ben/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ben/venv3_ansible-4.2.0/lib/python3.8/site-packages/ansible
ansible collection location = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections:/home/ben/.ansible/collections:/usr/share/ansible/collections
executable location = /home/ben/venv3_ansible-4.2.0/bin/ansible
python version = 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
jinja version = 3.0.1
libyaml = True
# fails (Ansible 5.8.0 installed via pip)
ansible [core 2.12.6]
config file = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg
configured module search path = ['/home/ben/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ben/venv3_ansible-5.8.0/lib/python3.8/site-packages/ansible
ansible collection location = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections:/home/ben/.ansible/collections:/usr/share/ansible/collections
executable location = /home/ben/venv3_ansible-5.8.0/bin/ansible
python version = 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed -t all
ANSIBLE_NOCOWS(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = True
CACHE_PLUGIN(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = jsonfile
CACHE_PLUGIN_CONNECTION(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = $HOME/ansible/facts
CACHE_PLUGIN_TIMEOUT(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = 600
CALLBACKS_ENABLED(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['timer', 'profile_roles', 'profile_tasks', 'junit']
COLLECTIONS_PATHS(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections', '/home/ben/.ansible/collections', '/usr/share/ansible/col>
DEFAULT_FORKS(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = 20
DEFAULT_GATHERING(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = smart
DEFAULT_INVENTORY_PLUGIN_PATH(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/plugins/inventory', '/home/ben/.ansible/plugins/inventory',>
DEFAULT_LOAD_CALLBACK_PLUGINS(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = True
DEFAULT_ROLES_PATH(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/roles', '/home/ben/.ansible/roles', '/usr/share/ansible/roles', '/etc/>
DEFAULT_STDOUT_CALLBACK(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = yaml
DEFAULT_VAULT_IDENTITY_LIST(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['vcenter@~/workspace/kiewit/ansible_vaults/vcenter', 'azure@~/workspace/kiewit/ansible_vaults/azure', 'ansible_use>
DEFAULT_VAULT_PASSWORD_FILE(env: ANSIBLE_VAULT_PASSWORD_FILE) = /home/ben/workspace/kiewit/ansible_vaults/molecule
GALAXY_SERVER_LIST(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['kiewit_published', 'kiewit_community']
HOST_KEY_CHECKING(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = False
INVENTORY_ENABLED(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['host_list', 'script', 'auto', 'yaml', 'ini', 'toml', 'vmware_vm_inventory', 'azure_rm', 'ldap_inventory']
RETRY_FILES_ENABLED(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = True
VARIABLE_PRECEDENCE(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['all_plugins_inventory', 'groups_plugins_inventory', 'all_inventory', 'groups_inventory', 'all_plugins_play', 'groups_plug>
```
### OS / Environment
Ubuntu 20.04 running Ansible installed via `pip` into a virtual environment.
### Steps to Reproduce
```yaml
---
# requirements.yml
collections:
- name: azure.azcollection
version: 1.7.0
- name: community.vmware
version: 1.17.0
- name: community.general
version: 4.5.0
- name: community.windows
version: 1.9.0
# custom published collection
- name: kiewit.content
version: 1.4.0
```
```bash
ansible-galaxy collection install -r collections/requirements.yml -p collections/ -vvvvv --force
```
GalaxyNG installed, custom collection published, remote collections synchronized to GalaxyNG via the following requirements.yml
```yaml
---
collections:
- name: azure.azcollection
- name: community.vmware
- name: community.general
- name: community.windows
```
Notice there are 316 community.vmware versions:

### Expected Results
I expect all listed requirements to be forcefully re-installed.
### Actual Results
```console
#
# Failing with `ansible-galaxy` via Ansible 5.8.0
#
ansible-galaxy [core 2.12.6]
config file = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg
configured module search path = ['/home/ben/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ben/venv3_ansible-5.8.0/lib/python3.8/site-packages/ansible
ansible collection location = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections:/home/ben/.ansible/collections:/usr/share/ansible/collections
executable location = /home/ben/venv3_ansible-5.8.0/bin/ansible-galaxy
python version = 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
jinja version = 3.1.2
libyaml = True
Using /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg as config file
Reading requirement file at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/requirements.yml'
Starting galaxy collection install process
Found installed collection azure.azcollection:1.7.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/azure/azcollection'
Found installed collection ansible.windows:1.10.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/ansible/windows'
Found installed collection community.vmware:2.5.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/vmware'
Found installed collection community.general:4.5.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/general'
Found installed collection community.windows:1.9.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/windows'
Found installed collection kiewit.content:1.4.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/kiewit/content'
Process install dependency map
Initial connection to galaxy_server: https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Found API version 'v3' with Galaxy server kiewit_published (https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/)
Opened /home/ben/.ansible/galaxy_token
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/azure/azcollection/
Initial connection to galaxy_server: https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Found API version 'v3' with Galaxy server kiewit_community (https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/)
Opened /home/ben/.ansible/galaxy_token
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/azure/azcollection/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/community/vmware/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/community/vmware/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/community/vmware/versions/?limit=100
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/plugin/ansible/content/community/collections/index/community/vmware/versions/?limit=100&offset=100
[WARNING]: Skipping Galaxy server https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/. Got an unexpected error when getting available versions of collection community.vmware:
'/api/galaxy/content/community/v3/plugin/ansible/content/community/collections/index/community/vmware/versions/'
ERROR! Failed to resolve the requested dependencies map. Could not satisfy the following requirements:
* community.vmware:2.5.0 (direct request)
#
# Working via ansible-galaxy provided in Ansible 2.9.12
#
ansible-galaxy 2.9.12
config file = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg
configured module search path = ['/home/ben/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ben/venv3_ansible-2.9.12/lib/python3.8/site-packages/ansible
executable location = /home/ben/venv3_ansible-2.9.12/bin/ansible-galaxy
python version = 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
Using /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg as config file
Reading requirement file at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/requirements.yml'
Found installed collection azure.azcollection:1.7.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/azure/azcollection'
Found installed collection ansible.windows:1.10.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/ansible/windows'
Found installed collection community.vmware:2.5.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/vmware'
Found installed collection community.general:4.5.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/general'
Found installed collection community.windows:1.9.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/windows'
Found installed collection kiewit.content:1.4.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/kiewit/content'
Process install dependency map
Initial connection to galaxy_server: https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Opened /home/ben/.ansible/galaxy_token
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Found API version 'v3' with Galaxy server kiewit_published (https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/)
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/azure/azcollection/versions/1.7.0/
Initial connection to galaxy_server: https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Opened /home/ben/.ansible/galaxy_token
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Processing requirement collection 'azure.azcollection'
Collection requirement 'azure.azcollection' is the name of a collection
Collection 'azure.azcollection' is not available from server kiewit_published https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Found API version 'v3' with Galaxy server kiewit_community (https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/)
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/azure/azcollection/versions/1.7.0/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/community/vmware/versions/2.5.0/
Collection 'azure.azcollection' obtained from server kiewit_community https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Processing requirement collection 'community.vmware'
Collection requirement 'community.vmware' is the name of a collection
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/community/vmware/versions/2.5.0/
Collection 'community.vmware' is not available from server kiewit_published https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/community/general/versions/4.5.0/
Collection 'community.vmware' obtained from server kiewit_community https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Processing requirement collection 'community.general'
Collection requirement 'community.general' is the name of a collection
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/community/general/versions/4.5.0/
Collection 'community.general' is not available from server kiewit_published https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/community/windows/versions/1.9.0/
Collection 'community.general' obtained from server kiewit_community https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Processing requirement collection 'community.windows'
Collection requirement 'community.windows' is the name of a collection
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/community/windows/versions/1.9.0/
Collection 'community.windows' is not available from server kiewit_published https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/kiewit/content/versions/1.4.0/
Collection 'community.windows' obtained from server kiewit_community https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Processing requirement collection 'kiewit.content'
Collection requirement 'kiewit.content' is the name of a collection
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/ansible/windows/versions/
Collection 'kiewit.content' obtained from server kiewit_published https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Processing requirement collection 'ansible.windows' - as dependency of community.windows
Collection requirement 'ansible.windows' is the name of a collection
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/plugin/ansible/content/community/collections/index/ansible/windows/versions/?limit=10&offset=10
Collection 'ansible.windows' obtained from server kiewit_community https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Starting collection install process
Installing 'azure.azcollection:1.7.0' to '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/azure/azcollection'
Downloading https://kneawxalp311.kiewitplaza.com/api/galaxy/v3/plugin/ansible/content/community/collections/artifacts/azure-azcollection-1.7.0.tar.gz to /home/ben/.ansible/tmp/ansible-local-20811912wtbig7j/tmp7pjnjmnu
Validating downloaded file hash 28041b95da141c55e01cc491f004b64cd9a7c7d2d28e622825543309744f689a with expected hash 28041b95da141c55e01cc491f004b64cd9a7c7d2d28e622825543309744f689a
Installing 'community.vmware:2.5.0' to '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/vmware'
Downloading https://kneawxalp311.kiewitplaza.com/api/galaxy/v3/plugin/ansible/content/community/collections/artifacts/community-vmware-2.5.0.tar.gz to /home/ben/.ansible/tmp/ansible-local-20811912wtbig7j/tmp7pjnjmnu
Validating downloaded file hash 4e3aaabdf4802af8ba9c08c77e464c1e0a52abcd7dec7d3aa51b1f865eadfe2a with expected hash 4e3aaabdf4802af8ba9c08c77e464c1e0a52abcd7dec7d3aa51b1f865eadfe2a
Installing 'community.general:4.5.0' to '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/general'
Downloading https://kneawxalp311.kiewitplaza.com/api/galaxy/v3/plugin/ansible/content/community/collections/artifacts/community-general-4.5.0.tar.gz to /home/ben/.ansible/tmp/ansible-local-20811912wtbig7j/tmp7pjnjmnu
Validating downloaded file hash c191817d1c19fef2ec2927d65dbec65f3f0bbba60523b95ec163a48ba39dbcf0 with expected hash c191817d1c19fef2ec2927d65dbec65f3f0bbba60523b95ec163a48ba39dbcf0
Installing 'community.windows:1.9.0' to '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/windows'
Downloading https://kneawxalp311.kiewitplaza.com/api/galaxy/v3/plugin/ansible/content/community/collections/artifacts/community-windows-1.9.0.tar.gz to /home/ben/.ansible/tmp/ansible-local-20811912wtbig7j/tmp7pjnjmnu
Validating downloaded file hash 63909e16fc055e39266127098f7c11e9da5e6000b231be5bdac74c1758ef9e9b with expected hash 63909e16fc055e39266127098f7c11e9da5e6000b231be5bdac74c1758ef9e9b
Installing 'kiewit.content:1.4.0' to '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/kiewit/content'
Downloading https://kneawxalp311.kiewitplaza.com/api/galaxy/v3/plugin/ansible/content/published/collections/artifacts/kiewit-content-1.4.0.tar.gz to /home/ben/.ansible/tmp/ansible-local-20811912wtbig7j/tmp7pjnjmnu
Validating downloaded file hash 5c72fee203e0e927ab7451cd0d7206c549f4280208f464ec0d5078711c70beda with expected hash 5c72fee203e0e927ab7451cd0d7206c549f4280208f464ec0d5078711c70beda
Skipping 'ansible.windows' as it is already installed
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/77911
|
https://github.com/ansible/ansible/pull/78325
|
1429672213af648aef239138745d593e2920ebdd
|
5728d72cda94e24314a95befb0130b9e773f1035
| 2022-05-25T18:21:23Z |
python
| 2022-07-21T21:18:07Z |
changelogs/fragments/78325-ansible-galaxy-fix-caching-paginated-responses-from-v3-servers.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 77,911 |
ansible-galaxy fails to install collection from GalaxyNG when there are many versions
|
### Summary
Given I have installed GalaxyNG and published custom content and synchronized content from galaxy.ansible.com, when I try to install collections listed in a `requirements.yml` via a modern version of `ansible-galaxy` configured to use GalaxyNG, `ansible-galaxy` fails when encountering the `community.vmware` collection.
If you comment out `community.vmware` from `requirements.yml`, all other collections (custom published and synchronized) install fine.
If you uncomment `community.vmware` and use an older version of `ansible-galaxy` (e.g., Ansible 2.9.12), all collections install fine.
### Issue Type
Bug Report
### Component Name
ansible-galaxy
### Ansible Version
```console
$ ansible --version
# works (Ansible 2.9.12 installed via pip)
ansible 2.9.12
config file = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg
configured module search path = ['/home/ben/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ben/venv3_ansible-2.9.12/lib/python3.8/site-packages/ansible
executable location = /home/ben/venv3_ansible-2.9.12/bin/ansible
python version = 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
# fails (Ansible 4.2.0 installed via pip)
ansible [core 2.11.2]
config file = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg
configured module search path = ['/home/ben/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ben/venv3_ansible-4.2.0/lib/python3.8/site-packages/ansible
ansible collection location = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections:/home/ben/.ansible/collections:/usr/share/ansible/collections
executable location = /home/ben/venv3_ansible-4.2.0/bin/ansible
python version = 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
jinja version = 3.0.1
libyaml = True
# fails (Ansible 5.8.0 installed via pip)
ansible [core 2.12.6]
config file = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg
configured module search path = ['/home/ben/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ben/venv3_ansible-5.8.0/lib/python3.8/site-packages/ansible
ansible collection location = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections:/home/ben/.ansible/collections:/usr/share/ansible/collections
executable location = /home/ben/venv3_ansible-5.8.0/bin/ansible
python version = 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed -t all
ANSIBLE_NOCOWS(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = True
CACHE_PLUGIN(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = jsonfile
CACHE_PLUGIN_CONNECTION(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = $HOME/ansible/facts
CACHE_PLUGIN_TIMEOUT(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = 600
CALLBACKS_ENABLED(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['timer', 'profile_roles', 'profile_tasks', 'junit']
COLLECTIONS_PATHS(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections', '/home/ben/.ansible/collections', '/usr/share/ansible/col>
DEFAULT_FORKS(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = 20
DEFAULT_GATHERING(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = smart
DEFAULT_INVENTORY_PLUGIN_PATH(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/plugins/inventory', '/home/ben/.ansible/plugins/inventory',>
DEFAULT_LOAD_CALLBACK_PLUGINS(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = True
DEFAULT_ROLES_PATH(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/roles', '/home/ben/.ansible/roles', '/usr/share/ansible/roles', '/etc/>
DEFAULT_STDOUT_CALLBACK(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = yaml
DEFAULT_VAULT_IDENTITY_LIST(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['vcenter@~/workspace/kiewit/ansible_vaults/vcenter', 'azure@~/workspace/kiewit/ansible_vaults/azure', 'ansible_use>
DEFAULT_VAULT_PASSWORD_FILE(env: ANSIBLE_VAULT_PASSWORD_FILE) = /home/ben/workspace/kiewit/ansible_vaults/molecule
GALAXY_SERVER_LIST(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['kiewit_published', 'kiewit_community']
HOST_KEY_CHECKING(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = False
INVENTORY_ENABLED(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['host_list', 'script', 'auto', 'yaml', 'ini', 'toml', 'vmware_vm_inventory', 'azure_rm', 'ldap_inventory']
RETRY_FILES_ENABLED(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = True
VARIABLE_PRECEDENCE(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['all_plugins_inventory', 'groups_plugins_inventory', 'all_inventory', 'groups_inventory', 'all_plugins_play', 'groups_plug>
```
### OS / Environment
Ubuntu 20.04 running Ansible installed via `pip` into a virtual environment.
### Steps to Reproduce
```yaml
---
# requirements.yml
collections:
- name: azure.azcollection
version: 1.7.0
- name: community.vmware
version: 1.17.0
- name: community.general
version: 4.5.0
- name: community.windows
version: 1.9.0
# custom published collection
- name: kiewit.content
version: 1.4.0
```
```bash
ansible-galaxy collection install -r collections/requirements.yml -p collections/ -vvvvv --force
```
GalaxyNG installed, custom collection published, remote collections synchronized to GalaxyNG via the following requirements.yml
```yaml
---
collections:
- name: azure.azcollection
- name: community.vmware
- name: community.general
- name: community.windows
```
Notice there are 316 community.vmware versions:

### Expected Results
I expect all listed requirements to be forcefully re-installed.
### Actual Results
```console
#
# Failing with `ansible-galaxy` via Ansible 5.8.0
#
ansible-galaxy [core 2.12.6]
config file = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg
configured module search path = ['/home/ben/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ben/venv3_ansible-5.8.0/lib/python3.8/site-packages/ansible
ansible collection location = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections:/home/ben/.ansible/collections:/usr/share/ansible/collections
executable location = /home/ben/venv3_ansible-5.8.0/bin/ansible-galaxy
python version = 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
jinja version = 3.1.2
libyaml = True
Using /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg as config file
Reading requirement file at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/requirements.yml'
Starting galaxy collection install process
Found installed collection azure.azcollection:1.7.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/azure/azcollection'
Found installed collection ansible.windows:1.10.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/ansible/windows'
Found installed collection community.vmware:2.5.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/vmware'
Found installed collection community.general:4.5.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/general'
Found installed collection community.windows:1.9.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/windows'
Found installed collection kiewit.content:1.4.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/kiewit/content'
Process install dependency map
Initial connection to galaxy_server: https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Found API version 'v3' with Galaxy server kiewit_published (https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/)
Opened /home/ben/.ansible/galaxy_token
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/azure/azcollection/
Initial connection to galaxy_server: https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Found API version 'v3' with Galaxy server kiewit_community (https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/)
Opened /home/ben/.ansible/galaxy_token
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/azure/azcollection/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/community/vmware/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/community/vmware/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/community/vmware/versions/?limit=100
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/plugin/ansible/content/community/collections/index/community/vmware/versions/?limit=100&offset=100
[WARNING]: Skipping Galaxy server https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/. Got an unexpected error when getting available versions of collection community.vmware:
'/api/galaxy/content/community/v3/plugin/ansible/content/community/collections/index/community/vmware/versions/'
ERROR! Failed to resolve the requested dependencies map. Could not satisfy the following requirements:
* community.vmware:2.5.0 (direct request)
#
# Working via ansible-galaxy provided in Ansible 2.9.12
#
ansible-galaxy 2.9.12
config file = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg
configured module search path = ['/home/ben/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ben/venv3_ansible-2.9.12/lib/python3.8/site-packages/ansible
executable location = /home/ben/venv3_ansible-2.9.12/bin/ansible-galaxy
python version = 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
Using /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg as config file
Reading requirement file at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/requirements.yml'
Found installed collection azure.azcollection:1.7.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/azure/azcollection'
Found installed collection ansible.windows:1.10.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/ansible/windows'
Found installed collection community.vmware:2.5.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/vmware'
Found installed collection community.general:4.5.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/general'
Found installed collection community.windows:1.9.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/windows'
Found installed collection kiewit.content:1.4.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/kiewit/content'
Process install dependency map
Initial connection to galaxy_server: https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Opened /home/ben/.ansible/galaxy_token
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Found API version 'v3' with Galaxy server kiewit_published (https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/)
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/azure/azcollection/versions/1.7.0/
Initial connection to galaxy_server: https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Opened /home/ben/.ansible/galaxy_token
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Processing requirement collection 'azure.azcollection'
Collection requirement 'azure.azcollection' is the name of a collection
Collection 'azure.azcollection' is not available from server kiewit_published https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Found API version 'v3' with Galaxy server kiewit_community (https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/)
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/azure/azcollection/versions/1.7.0/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/community/vmware/versions/2.5.0/
Collection 'azure.azcollection' obtained from server kiewit_community https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Processing requirement collection 'community.vmware'
Collection requirement 'community.vmware' is the name of a collection
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/community/vmware/versions/2.5.0/
Collection 'community.vmware' is not available from server kiewit_published https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/community/general/versions/4.5.0/
Collection 'community.vmware' obtained from server kiewit_community https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Processing requirement collection 'community.general'
Collection requirement 'community.general' is the name of a collection
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/community/general/versions/4.5.0/
Collection 'community.general' is not available from server kiewit_published https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/community/windows/versions/1.9.0/
Collection 'community.general' obtained from server kiewit_community https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Processing requirement collection 'community.windows'
Collection requirement 'community.windows' is the name of a collection
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/community/windows/versions/1.9.0/
Collection 'community.windows' is not available from server kiewit_published https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/kiewit/content/versions/1.4.0/
Collection 'community.windows' obtained from server kiewit_community https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Processing requirement collection 'kiewit.content'
Collection requirement 'kiewit.content' is the name of a collection
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/ansible/windows/versions/
Collection 'kiewit.content' obtained from server kiewit_published https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Processing requirement collection 'ansible.windows' - as dependency of community.windows
Collection requirement 'ansible.windows' is the name of a collection
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/plugin/ansible/content/community/collections/index/ansible/windows/versions/?limit=10&offset=10
Collection 'ansible.windows' obtained from server kiewit_community https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Starting collection install process
Installing 'azure.azcollection:1.7.0' to '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/azure/azcollection'
Downloading https://kneawxalp311.kiewitplaza.com/api/galaxy/v3/plugin/ansible/content/community/collections/artifacts/azure-azcollection-1.7.0.tar.gz to /home/ben/.ansible/tmp/ansible-local-20811912wtbig7j/tmp7pjnjmnu
Validating downloaded file hash 28041b95da141c55e01cc491f004b64cd9a7c7d2d28e622825543309744f689a with expected hash 28041b95da141c55e01cc491f004b64cd9a7c7d2d28e622825543309744f689a
Installing 'community.vmware:2.5.0' to '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/vmware'
Downloading https://kneawxalp311.kiewitplaza.com/api/galaxy/v3/plugin/ansible/content/community/collections/artifacts/community-vmware-2.5.0.tar.gz to /home/ben/.ansible/tmp/ansible-local-20811912wtbig7j/tmp7pjnjmnu
Validating downloaded file hash 4e3aaabdf4802af8ba9c08c77e464c1e0a52abcd7dec7d3aa51b1f865eadfe2a with expected hash 4e3aaabdf4802af8ba9c08c77e464c1e0a52abcd7dec7d3aa51b1f865eadfe2a
Installing 'community.general:4.5.0' to '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/general'
Downloading https://kneawxalp311.kiewitplaza.com/api/galaxy/v3/plugin/ansible/content/community/collections/artifacts/community-general-4.5.0.tar.gz to /home/ben/.ansible/tmp/ansible-local-20811912wtbig7j/tmp7pjnjmnu
Validating downloaded file hash c191817d1c19fef2ec2927d65dbec65f3f0bbba60523b95ec163a48ba39dbcf0 with expected hash c191817d1c19fef2ec2927d65dbec65f3f0bbba60523b95ec163a48ba39dbcf0
Installing 'community.windows:1.9.0' to '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/windows'
Downloading https://kneawxalp311.kiewitplaza.com/api/galaxy/v3/plugin/ansible/content/community/collections/artifacts/community-windows-1.9.0.tar.gz to /home/ben/.ansible/tmp/ansible-local-20811912wtbig7j/tmp7pjnjmnu
Validating downloaded file hash 63909e16fc055e39266127098f7c11e9da5e6000b231be5bdac74c1758ef9e9b with expected hash 63909e16fc055e39266127098f7c11e9da5e6000b231be5bdac74c1758ef9e9b
Installing 'kiewit.content:1.4.0' to '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/kiewit/content'
Downloading https://kneawxalp311.kiewitplaza.com/api/galaxy/v3/plugin/ansible/content/published/collections/artifacts/kiewit-content-1.4.0.tar.gz to /home/ben/.ansible/tmp/ansible-local-20811912wtbig7j/tmp7pjnjmnu
Validating downloaded file hash 5c72fee203e0e927ab7451cd0d7206c549f4280208f464ec0d5078711c70beda with expected hash 5c72fee203e0e927ab7451cd0d7206c549f4280208f464ec0d5078711c70beda
Skipping 'ansible.windows' as it is already installed
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/77911
|
https://github.com/ansible/ansible/pull/78325
|
1429672213af648aef239138745d593e2920ebdd
|
5728d72cda94e24314a95befb0130b9e773f1035
| 2022-05-25T18:21:23Z |
python
| 2022-07-21T21:18:07Z |
lib/ansible/galaxy/api.py
|
# (C) 2013, James Cammarata <[email protected]>
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import datetime
import functools
import hashlib
import json
import os
import stat
import tarfile
import time
import threading
from urllib.error import HTTPError
from urllib.parse import quote as urlquote, urlencode, urlparse, parse_qs, urljoin
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils.api import retry_with_delays_and_condition
from ansible.module_utils.api import generate_jittered_backoff
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.urls import open_url, prepare_multipart
from ansible.utils.display import Display
from ansible.utils.hashing import secure_hash_s
from ansible.utils.path import makedirs_safe
display = Display()
_CACHE_LOCK = threading.Lock()
COLLECTION_PAGE_SIZE = 100
RETRY_HTTP_ERROR_CODES = [ # TODO: Allow user-configuration
429, # Too Many Requests
520, # Galaxy rate limit error code (Cloudflare unknown error)
]
def cache_lock(func):
def wrapped(*args, **kwargs):
with _CACHE_LOCK:
return func(*args, **kwargs)
return wrapped
def is_rate_limit_exception(exception):
# Note: cloud.redhat.com masks rate limit errors with 403 (Forbidden) error codes.
# Since 403 could reflect the actual problem (such as an expired token), we should
# not retry by default.
return isinstance(exception, GalaxyError) and exception.http_code in RETRY_HTTP_ERROR_CODES
def g_connect(versions):
"""
Wrapper to lazily initialize connection info to Galaxy and verify the API versions required are available on the
endpoint.
:param versions: A list of API versions that the function supports.
"""
def decorator(method):
def wrapped(self, *args, **kwargs):
if not self._available_api_versions:
display.vvvv("Initial connection to galaxy_server: %s" % self.api_server)
# Determine the type of Galaxy server we are talking to. First try it unauthenticated then with Bearer
# auth for Automation Hub.
n_url = self.api_server
error_context_msg = 'Error when finding available api versions from %s (%s)' % (self.name, n_url)
if self.api_server == 'https://galaxy.ansible.com' or self.api_server == 'https://galaxy.ansible.com/':
n_url = 'https://galaxy.ansible.com/api/'
try:
data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg, cache=True)
except (AnsibleError, GalaxyError, ValueError, KeyError) as err:
# Either the URL doesnt exist, or other error. Or the URL exists, but isn't a galaxy API
# root (not JSON, no 'available_versions') so try appending '/api/'
if n_url.endswith('/api') or n_url.endswith('/api/'):
raise
# Let exceptions here bubble up but raise the original if this returns a 404 (/api/ wasn't found).
n_url = _urljoin(n_url, '/api/')
try:
data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg, cache=True)
except GalaxyError as new_err:
if new_err.http_code == 404:
raise err
raise
if 'available_versions' not in data:
raise AnsibleError("Tried to find galaxy API root at %s but no 'available_versions' are available "
"on %s" % (n_url, self.api_server))
# Update api_server to point to the "real" API root, which in this case could have been the configured
# url + '/api/' appended.
self.api_server = n_url
# Default to only supporting v1, if only v1 is returned we also assume that v2 is available even though
# it isn't returned in the available_versions dict.
available_versions = data.get('available_versions', {u'v1': u'v1/'})
if list(available_versions.keys()) == [u'v1']:
available_versions[u'v2'] = u'v2/'
self._available_api_versions = available_versions
display.vvvv("Found API version '%s' with Galaxy server %s (%s)"
% (', '.join(available_versions.keys()), self.name, self.api_server))
# Verify that the API versions the function works with are available on the server specified.
available_versions = set(self._available_api_versions.keys())
common_versions = set(versions).intersection(available_versions)
if not common_versions:
raise AnsibleError("Galaxy action %s requires API versions '%s' but only '%s' are available on %s %s"
% (method.__name__, ", ".join(versions), ", ".join(available_versions),
self.name, self.api_server))
return method(self, *args, **kwargs)
return wrapped
return decorator
def get_cache_id(url):
""" Gets the cache ID for the URL specified. """
url_info = urlparse(url)
port = None
try:
port = url_info.port
except ValueError:
pass # While the URL is probably invalid, let the caller figure that out when using it
# Cannot use netloc because it could contain credentials if the server specified had them in there.
return '%s:%s' % (url_info.hostname, port or '')
@cache_lock
def _load_cache(b_cache_path):
""" Loads the cache file requested if possible. The file must not be world writable. """
cache_version = 1
if not os.path.isfile(b_cache_path):
display.vvvv("Creating Galaxy API response cache file at '%s'" % to_text(b_cache_path))
with open(b_cache_path, 'w'):
os.chmod(b_cache_path, 0o600)
cache_mode = os.stat(b_cache_path).st_mode
if cache_mode & stat.S_IWOTH:
display.warning("Galaxy cache has world writable access (%s), ignoring it as a cache source."
% to_text(b_cache_path))
return
with open(b_cache_path, mode='rb') as fd:
json_val = to_text(fd.read(), errors='surrogate_or_strict')
try:
cache = json.loads(json_val)
except ValueError:
cache = None
if not isinstance(cache, dict) or cache.get('version', None) != cache_version:
display.vvvv("Galaxy cache file at '%s' has an invalid version, clearing" % to_text(b_cache_path))
cache = {'version': cache_version}
# Set the cache after we've cleared the existing entries
with open(b_cache_path, mode='wb') as fd:
fd.write(to_bytes(json.dumps(cache), errors='surrogate_or_strict'))
return cache
def _urljoin(*args):
return '/'.join(to_native(a, errors='surrogate_or_strict').strip('/') for a in args + ('',) if a)
class GalaxyError(AnsibleError):
""" Error for bad Galaxy server responses. """
def __init__(self, http_error, message):
super(GalaxyError, self).__init__(message)
self.http_code = http_error.code
self.url = http_error.geturl()
try:
http_msg = to_text(http_error.read())
err_info = json.loads(http_msg)
except (AttributeError, ValueError):
err_info = {}
url_split = self.url.split('/')
if 'v2' in url_split:
galaxy_msg = err_info.get('message', http_error.reason)
code = err_info.get('code', 'Unknown')
full_error_msg = u"%s (HTTP Code: %d, Message: %s Code: %s)" % (message, self.http_code, galaxy_msg, code)
elif 'v3' in url_split:
errors = err_info.get('errors', [])
if not errors:
errors = [{}] # Defaults are set below, we just need to make sure 1 error is present.
message_lines = []
for error in errors:
error_msg = error.get('detail') or error.get('title') or http_error.reason
error_code = error.get('code') or 'Unknown'
message_line = u"(HTTP Code: %d, Message: %s Code: %s)" % (self.http_code, error_msg, error_code)
message_lines.append(message_line)
full_error_msg = "%s %s" % (message, ', '.join(message_lines))
else:
# v1 and unknown API endpoints
galaxy_msg = err_info.get('default', http_error.reason)
full_error_msg = u"%s (HTTP Code: %d, Message: %s)" % (message, self.http_code, galaxy_msg)
self.message = to_native(full_error_msg)
# Keep the raw string results for the date. It's too complex to parse as a datetime object and the various APIs return
# them in different formats.
CollectionMetadata = collections.namedtuple('CollectionMetadata', ['namespace', 'name', 'created_str', 'modified_str'])
class CollectionVersionMetadata:
def __init__(self, namespace, name, version, download_url, artifact_sha256, dependencies, signatures_url, signatures):
"""
Contains common information about a collection on a Galaxy server to smooth through API differences for
Collection and define a standard meta info for a collection.
:param namespace: The namespace name.
:param name: The collection name.
:param version: The version that the metadata refers to.
:param download_url: The URL to download the collection.
:param artifact_sha256: The SHA256 of the collection artifact for later verification.
:param dependencies: A dict of dependencies of the collection.
:param signatures_url: The URL to the specific version of the collection.
:param signatures: The list of signatures found at the signatures_url.
"""
self.namespace = namespace
self.name = name
self.version = version
self.download_url = download_url
self.artifact_sha256 = artifact_sha256
self.dependencies = dependencies
self.signatures_url = signatures_url
self.signatures = signatures
@functools.total_ordering
class GalaxyAPI:
""" This class is meant to be used as a API client for an Ansible Galaxy server """
def __init__(
self, galaxy, name, url,
username=None, password=None, token=None, validate_certs=True,
available_api_versions=None,
clear_response_cache=False, no_cache=True,
priority=float('inf'),
timeout=60,
):
self.galaxy = galaxy
self.name = name
self.username = username
self.password = password
self.token = token
self.api_server = url
self.validate_certs = validate_certs
self.timeout = timeout
self._available_api_versions = available_api_versions or {}
self._priority = priority
self._server_timeout = timeout
b_cache_dir = to_bytes(C.GALAXY_CACHE_DIR, errors='surrogate_or_strict')
makedirs_safe(b_cache_dir, mode=0o700)
self._b_cache_path = os.path.join(b_cache_dir, b'api.json')
if clear_response_cache:
with _CACHE_LOCK:
if os.path.exists(self._b_cache_path):
display.vvvv("Clearing cache file (%s)" % to_text(self._b_cache_path))
os.remove(self._b_cache_path)
self._cache = None
if not no_cache:
self._cache = _load_cache(self._b_cache_path)
display.debug('Validate TLS certificates for %s: %s' % (self.api_server, self.validate_certs))
def __str__(self):
# type: (GalaxyAPI) -> str
"""Render GalaxyAPI as a native string representation."""
return to_native(self.name)
def __unicode__(self):
# type: (GalaxyAPI) -> str
"""Render GalaxyAPI as a unicode/text string representation."""
return to_text(self.name)
def __repr__(self):
# type: (GalaxyAPI) -> str
"""Render GalaxyAPI as an inspectable string representation."""
return (
'<{instance!s} "{name!s}" @ {url!s} with priority {priority!s}>'.
format(
instance=self, name=self.name,
priority=self._priority, url=self.api_server,
)
)
def __lt__(self, other_galaxy_api):
# type: (GalaxyAPI, GalaxyAPI) -> bool
"""Return whether the instance priority is higher than other."""
if not isinstance(other_galaxy_api, self.__class__):
return NotImplemented
return (
self._priority > other_galaxy_api._priority or
self.name < self.name
)
@property # type: ignore[misc] # https://github.com/python/mypy/issues/1362
@g_connect(['v1', 'v2', 'v3'])
def available_api_versions(self):
# Calling g_connect will populate self._available_api_versions
return self._available_api_versions
@retry_with_delays_and_condition(
backoff_iterator=generate_jittered_backoff(retries=6, delay_base=2, delay_threshold=40),
should_retry_error=is_rate_limit_exception
)
def _call_galaxy(self, url, args=None, headers=None, method=None, auth_required=False, error_context_msg=None,
cache=False):
url_info = urlparse(url)
cache_id = get_cache_id(url)
query = parse_qs(url_info.query)
if cache and self._cache:
server_cache = self._cache.setdefault(cache_id, {})
iso_datetime_format = '%Y-%m-%dT%H:%M:%SZ'
valid = False
if url_info.path in server_cache:
expires = datetime.datetime.strptime(server_cache[url_info.path]['expires'], iso_datetime_format)
valid = datetime.datetime.utcnow() < expires
is_paginated_url = 'page' in query or 'offset' in query
if valid and not is_paginated_url:
# Got a hit on the cache and we aren't getting a paginated response
path_cache = server_cache[url_info.path]
if path_cache.get('paginated'):
if '/v3/' in url_info.path:
res = {'links': {'next': None}}
else:
res = {'next': None}
# Technically some v3 paginated APIs return in 'data' but the caller checks the keys for this so
# always returning the cache under results is fine.
res['results'] = []
for result in path_cache['results']:
res['results'].append(result)
else:
res = path_cache['results']
return res
elif not is_paginated_url:
# The cache entry had expired or does not exist, start a new blank entry to be filled later.
expires = datetime.datetime.utcnow()
expires += datetime.timedelta(days=1)
server_cache[url_info.path] = {
'expires': expires.strftime(iso_datetime_format),
'paginated': False,
}
headers = headers or {}
self._add_auth_token(headers, url, required=auth_required)
try:
display.vvvv("Calling Galaxy at %s" % url)
resp = open_url(to_native(url), data=args, validate_certs=self.validate_certs, headers=headers,
method=method, timeout=self._server_timeout, http_agent=user_agent(), follow_redirects='safe')
except HTTPError as e:
raise GalaxyError(e, error_context_msg)
except Exception as e:
raise AnsibleError("Unknown error when attempting to call Galaxy at '%s': %s" % (url, to_native(e)))
resp_data = to_text(resp.read(), errors='surrogate_or_strict')
try:
data = json.loads(resp_data)
except ValueError:
raise AnsibleError("Failed to parse Galaxy response from '%s' as JSON:\n%s"
% (resp.url, to_native(resp_data)))
if cache and self._cache:
path_cache = self._cache[cache_id][url_info.path]
# v3 can return data or results for paginated results. Scan the result so we can determine what to cache.
paginated_key = None
for key in ['data', 'results']:
if key in data:
paginated_key = key
break
if paginated_key:
path_cache['paginated'] = True
results = path_cache.setdefault('results', [])
for result in data[paginated_key]:
results.append(result)
else:
path_cache['results'] = data
return data
def _add_auth_token(self, headers, url, token_type=None, required=False):
# Don't add the auth token if one is already present
if 'Authorization' in headers:
return
if not self.token and required:
raise AnsibleError("No access token or username set. A token can be set with --api-key "
"or at {0}.".format(to_native(C.GALAXY_TOKEN_PATH)))
if self.token:
headers.update(self.token.headers())
@cache_lock
def _set_cache(self):
with open(self._b_cache_path, mode='wb') as fd:
fd.write(to_bytes(json.dumps(self._cache), errors='surrogate_or_strict'))
@g_connect(['v1'])
def authenticate(self, github_token):
"""
Retrieve an authentication token
"""
url = _urljoin(self.api_server, self.available_api_versions['v1'], "tokens") + '/'
args = urlencode({"github_token": github_token})
try:
resp = open_url(url, data=args, validate_certs=self.validate_certs, method="POST", http_agent=user_agent(), timeout=self._server_timeout)
except HTTPError as e:
raise GalaxyError(e, 'Attempting to authenticate to galaxy')
except Exception as e:
raise AnsibleError('Unable to authenticate to galaxy: %s' % to_native(e), orig_exc=e)
data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
return data
@g_connect(['v1'])
def create_import_task(self, github_user, github_repo, reference=None, role_name=None):
"""
Post an import request
"""
url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports") + '/'
args = {
"github_user": github_user,
"github_repo": github_repo,
"github_reference": reference if reference else ""
}
if role_name:
args['alternate_role_name'] = role_name
elif github_repo.startswith('ansible-role'):
args['alternate_role_name'] = github_repo[len('ansible-role') + 1:]
data = self._call_galaxy(url, args=urlencode(args), method="POST")
if data.get('results', None):
return data['results']
return data
@g_connect(['v1'])
def get_import_task(self, task_id=None, github_user=None, github_repo=None):
"""
Check the status of an import task.
"""
url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports")
if task_id is not None:
url = "%s?id=%d" % (url, task_id)
elif github_user is not None and github_repo is not None:
url = "%s?github_user=%s&github_repo=%s" % (url, github_user, github_repo)
else:
raise AnsibleError("Expected task_id or github_user and github_repo")
data = self._call_galaxy(url)
return data['results']
@g_connect(['v1'])
def lookup_role_by_name(self, role_name, notify=True):
"""
Find a role by name.
"""
role_name = to_text(urlquote(to_bytes(role_name)))
try:
parts = role_name.split(".")
user_name = ".".join(parts[0:-1])
role_name = parts[-1]
if notify:
display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
except Exception:
raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name)
url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles",
"?owner__username=%s&name=%s" % (user_name, role_name))
data = self._call_galaxy(url)
if len(data["results"]) != 0:
return data["results"][0]
return None
@g_connect(['v1'])
def fetch_role_related(self, related, role_id):
"""
Fetch the list of related items for the given role.
The url comes from the 'related' field of the role.
"""
results = []
try:
url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles", role_id, related,
"?page_size=50")
data = self._call_galaxy(url)
results = data['results']
done = (data.get('next_link', None) is None)
# https://github.com/ansible/ansible/issues/64355
# api_server contains part of the API path but next_link includes the /api part so strip it out.
url_info = urlparse(self.api_server)
base_url = "%s://%s/" % (url_info.scheme, url_info.netloc)
while not done:
url = _urljoin(base_url, data['next_link'])
data = self._call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
except Exception as e:
display.warning("Unable to retrieve role (id=%s) data (%s), but this is not fatal so we continue: %s"
% (role_id, related, to_text(e)))
return results
@g_connect(['v1'])
def get_list(self, what):
"""
Fetch the list of items specified.
"""
try:
url = _urljoin(self.api_server, self.available_api_versions['v1'], what, "?page_size")
data = self._call_galaxy(url)
if "results" in data:
results = data['results']
else:
results = data
done = True
if "next" in data:
done = (data.get('next_link', None) is None)
while not done:
url = _urljoin(self.api_server, data['next_link'])
data = self._call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
return results
except Exception as error:
raise AnsibleError("Failed to download the %s list: %s" % (what, to_native(error)))
@g_connect(['v1'])
def search_roles(self, search, **kwargs):
search_url = _urljoin(self.api_server, self.available_api_versions['v1'], "search", "roles", "?")
if search:
search_url += '&autocomplete=' + to_text(urlquote(to_bytes(search)))
tags = kwargs.get('tags', None)
platforms = kwargs.get('platforms', None)
page_size = kwargs.get('page_size', None)
author = kwargs.get('author', None)
if tags and isinstance(tags, string_types):
tags = tags.split(',')
search_url += '&tags_autocomplete=' + '+'.join(tags)
if platforms and isinstance(platforms, string_types):
platforms = platforms.split(',')
search_url += '&platforms_autocomplete=' + '+'.join(platforms)
if page_size:
search_url += '&page_size=%s' % page_size
if author:
search_url += '&username_autocomplete=%s' % author
data = self._call_galaxy(search_url)
return data
@g_connect(['v1'])
def add_secret(self, source, github_user, github_repo, secret):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets") + '/'
args = urlencode({
"source": source,
"github_user": github_user,
"github_repo": github_repo,
"secret": secret
})
data = self._call_galaxy(url, args=args, method="POST")
return data
@g_connect(['v1'])
def list_secrets(self):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets")
data = self._call_galaxy(url, auth_required=True)
return data
@g_connect(['v1'])
def remove_secret(self, secret_id):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets", secret_id) + '/'
data = self._call_galaxy(url, auth_required=True, method='DELETE')
return data
@g_connect(['v1'])
def delete_role(self, github_user, github_repo):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "removerole",
"?github_user=%s&github_repo=%s" % (github_user, github_repo))
data = self._call_galaxy(url, auth_required=True, method='DELETE')
return data
# Collection APIs #
@g_connect(['v2', 'v3'])
def publish_collection(self, collection_path):
"""
Publishes a collection to a Galaxy server and returns the import task URI.
:param collection_path: The path to the collection tarball to publish.
:return: The import task URI that contains the import results.
"""
display.display("Publishing collection artifact '%s' to %s %s" % (collection_path, self.name, self.api_server))
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
if not os.path.exists(b_collection_path):
raise AnsibleError("The collection path specified '%s' does not exist." % to_native(collection_path))
elif not tarfile.is_tarfile(b_collection_path):
raise AnsibleError("The collection path specified '%s' is not a tarball, use 'ansible-galaxy collection "
"build' to create a proper release artifact." % to_native(collection_path))
with open(b_collection_path, 'rb') as collection_tar:
sha256 = secure_hash_s(collection_tar.read(), hash_func=hashlib.sha256)
content_type, b_form_data = prepare_multipart(
{
'sha256': sha256,
'file': {
'filename': b_collection_path,
'mime_type': 'application/octet-stream',
},
}
)
headers = {
'Content-type': content_type,
'Content-length': len(b_form_data),
}
if 'v3' in self.available_api_versions:
n_url = _urljoin(self.api_server, self.available_api_versions['v3'], 'artifacts', 'collections') + '/'
else:
n_url = _urljoin(self.api_server, self.available_api_versions['v2'], 'collections') + '/'
resp = self._call_galaxy(n_url, args=b_form_data, headers=headers, method='POST', auth_required=True,
error_context_msg='Error when publishing collection to %s (%s)'
% (self.name, self.api_server))
return resp['task']
@g_connect(['v2', 'v3'])
def wait_import_task(self, task_id, timeout=0):
"""
Waits until the import process on the Galaxy server has completed or the timeout is reached.
:param task_id: The id of the import task to wait for. This can be parsed out of the return
value for GalaxyAPI.publish_collection.
:param timeout: The timeout in seconds, 0 is no timeout.
"""
state = 'waiting'
data = None
# Construct the appropriate URL per version
if 'v3' in self.available_api_versions:
full_url = _urljoin(self.api_server, self.available_api_versions['v3'],
'imports/collections', task_id, '/')
else:
full_url = _urljoin(self.api_server, self.available_api_versions['v2'],
'collection-imports', task_id, '/')
display.display("Waiting until Galaxy import task %s has completed" % full_url)
start = time.time()
wait = 2
while timeout == 0 or (time.time() - start) < timeout:
try:
data = self._call_galaxy(full_url, method='GET', auth_required=True,
error_context_msg='Error when getting import task results at %s' % full_url)
except GalaxyError as e:
if e.http_code != 404:
raise
# The import job may not have started, and as such, the task url may not yet exist
display.vvv('Galaxy import process has not started, wait %s seconds before trying again' % wait)
time.sleep(wait)
continue
state = data.get('state', 'waiting')
if data.get('finished_at', None):
break
display.vvv('Galaxy import process has a status of %s, wait %d seconds before trying again'
% (state, wait))
time.sleep(wait)
# poor man's exponential backoff algo so we don't flood the Galaxy API, cap at 30 seconds.
wait = min(30, wait * 1.5)
if state == 'waiting':
raise AnsibleError("Timeout while waiting for the Galaxy import process to finish, check progress at '%s'"
% to_native(full_url))
for message in data.get('messages', []):
level = message['level']
if level.lower() == 'error':
display.error("Galaxy import error message: %s" % message['message'])
elif level.lower() == 'warning':
display.warning("Galaxy import warning message: %s" % message['message'])
else:
display.vvv("Galaxy import message: %s - %s" % (level, message['message']))
if state == 'failed':
code = to_native(data['error'].get('code', 'UNKNOWN'))
description = to_native(
data['error'].get('description', "Unknown error, see %s for more details" % full_url))
raise AnsibleError("Galaxy import process failed: %s (Code: %s)" % (description, code))
@g_connect(['v2', 'v3'])
def get_collection_metadata(self, namespace, name):
"""
Gets the collection information from the Galaxy server about a specific Collection.
:param namespace: The collection namespace.
:param name: The collection name.
return: CollectionMetadata about the collection.
"""
if 'v3' in self.available_api_versions:
api_path = self.available_api_versions['v3']
field_map = [
('created_str', 'created_at'),
('modified_str', 'updated_at'),
]
else:
api_path = self.available_api_versions['v2']
field_map = [
('created_str', 'created'),
('modified_str', 'modified'),
]
info_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, '/')
error_context_msg = 'Error when getting the collection info for %s.%s from %s (%s)' \
% (namespace, name, self.name, self.api_server)
data = self._call_galaxy(info_url, error_context_msg=error_context_msg)
metadata = {}
for name, api_field in field_map:
metadata[name] = data.get(api_field, None)
return CollectionMetadata(namespace, name, **metadata)
@g_connect(['v2', 'v3'])
def get_collection_version_metadata(self, namespace, name, version):
"""
Gets the collection information from the Galaxy server about a specific Collection version.
:param namespace: The collection namespace.
:param name: The collection name.
:param version: Version of the collection to get the information for.
:return: CollectionVersionMetadata about the collection at the version requested.
"""
api_path = self.available_api_versions.get('v3', self.available_api_versions.get('v2'))
url_paths = [self.api_server, api_path, 'collections', namespace, name, 'versions', version, '/']
n_collection_url = _urljoin(*url_paths)
error_context_msg = 'Error when getting collection version metadata for %s.%s:%s from %s (%s)' \
% (namespace, name, version, self.name, self.api_server)
data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg, cache=True)
self._set_cache()
signatures = data.get('signatures') or []
return CollectionVersionMetadata(data['namespace']['name'], data['collection']['name'], data['version'],
data['download_url'], data['artifact']['sha256'],
data['metadata']['dependencies'], data['href'], signatures)
@g_connect(['v2', 'v3'])
def get_collection_versions(self, namespace, name):
"""
Gets a list of available versions for a collection on a Galaxy server.
:param namespace: The collection namespace.
:param name: The collection name.
:return: A list of versions that are available.
"""
relative_link = False
if 'v3' in self.available_api_versions:
api_path = self.available_api_versions['v3']
pagination_path = ['links', 'next']
relative_link = True # AH pagination results are relative an not an absolute URI.
else:
api_path = self.available_api_versions['v2']
pagination_path = ['next']
page_size_name = 'limit' if 'v3' in self.available_api_versions else 'page_size'
versions_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, 'versions', '/?%s=%d' % (page_size_name, COLLECTION_PAGE_SIZE))
versions_url_info = urlparse(versions_url)
# We should only rely on the cache if the collection has not changed. This may slow things down but it ensures
# we are not waiting a day before finding any new collections that have been published.
if self._cache:
server_cache = self._cache.setdefault(get_cache_id(versions_url), {})
modified_cache = server_cache.setdefault('modified', {})
try:
modified_date = self.get_collection_metadata(namespace, name).modified_str
except GalaxyError as err:
if err.http_code != 404:
raise
# No collection found, return an empty list to keep things consistent with the various APIs
return []
cached_modified_date = modified_cache.get('%s.%s' % (namespace, name), None)
if cached_modified_date != modified_date:
modified_cache['%s.%s' % (namespace, name)] = modified_date
if versions_url_info.path in server_cache:
del server_cache[versions_url_info.path]
self._set_cache()
error_context_msg = 'Error when getting available collection versions for %s.%s from %s (%s)' \
% (namespace, name, self.name, self.api_server)
try:
data = self._call_galaxy(versions_url, error_context_msg=error_context_msg, cache=True)
except GalaxyError as err:
if err.http_code != 404:
raise
# v3 doesn't raise a 404 so we need to mimick the empty response from APIs that do.
return []
if 'data' in data:
# v3 automation-hub is the only known API that uses `data`
# since v3 pulp_ansible does not, we cannot rely on version
# to indicate which key to use
results_key = 'data'
else:
results_key = 'results'
versions = []
while True:
versions += [v['version'] for v in data[results_key]]
next_link = data
for path in pagination_path:
next_link = next_link.get(path, {})
if not next_link:
break
elif relative_link:
# TODO: This assumes the pagination result is relative to the root server. Will need to be verified
# with someone who knows the AH API.
# Remove the query string from the versions_url to use the next_link's query
versions_url = urljoin(versions_url, urlparse(versions_url).path)
next_link = versions_url.replace(versions_url_info.path, next_link)
data = self._call_galaxy(to_native(next_link, errors='surrogate_or_strict'),
error_context_msg=error_context_msg, cache=True)
self._set_cache()
return versions
@g_connect(['v2', 'v3'])
def get_collection_signatures(self, namespace, name, version):
"""
Gets the collection signatures from the Galaxy server about a specific Collection version.
:param namespace: The collection namespace.
:param name: The collection name.
:param version: Version of the collection to get the information for.
:return: A list of signature strings.
"""
api_path = self.available_api_versions.get('v3', self.available_api_versions.get('v2'))
url_paths = [self.api_server, api_path, 'collections', namespace, name, 'versions', version, '/']
n_collection_url = _urljoin(*url_paths)
error_context_msg = 'Error when getting collection version metadata for %s.%s:%s from %s (%s)' \
% (namespace, name, version, self.name, self.api_server)
data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg, cache=True)
self._set_cache()
try:
signatures = data["signatures"]
except KeyError:
# Noisy since this is used by the dep resolver, so require more verbosity than Galaxy calls
display.vvvvvv(f"Server {self.api_server} has not signed {namespace}.{name}:{version}")
return []
else:
return [signature_info["signature"] for signature_info in signatures]
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 77,911 |
ansible-galaxy fails to install collection from GalaxyNG when there are many versions
|
### Summary
Given I have installed GalaxyNG and published custom content and synchronized content from galaxy.ansible.com, when I try to install collections listed in a `requirements.yml` via a modern version of `ansible-galaxy` configured to use GalaxyNG, `ansible-galaxy` fails when encountering the `community.vmware` collection.
If you comment out `community.vmware` from `requirements.yml`, all other collections (custom published and synchronized) install fine.
If you uncomment `community.vmware` and use an older version of `ansible-galaxy` (e.g., Ansible 2.9.12), all collections install fine.
### Issue Type
Bug Report
### Component Name
ansible-galaxy
### Ansible Version
```console
$ ansible --version
# works (Ansible 2.9.12 installed via pip)
ansible 2.9.12
config file = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg
configured module search path = ['/home/ben/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ben/venv3_ansible-2.9.12/lib/python3.8/site-packages/ansible
executable location = /home/ben/venv3_ansible-2.9.12/bin/ansible
python version = 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
# fails (Ansible 4.2.0 installed via pip)
ansible [core 2.11.2]
config file = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg
configured module search path = ['/home/ben/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ben/venv3_ansible-4.2.0/lib/python3.8/site-packages/ansible
ansible collection location = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections:/home/ben/.ansible/collections:/usr/share/ansible/collections
executable location = /home/ben/venv3_ansible-4.2.0/bin/ansible
python version = 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
jinja version = 3.0.1
libyaml = True
# fails (Ansible 5.8.0 installed via pip)
ansible [core 2.12.6]
config file = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg
configured module search path = ['/home/ben/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ben/venv3_ansible-5.8.0/lib/python3.8/site-packages/ansible
ansible collection location = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections:/home/ben/.ansible/collections:/usr/share/ansible/collections
executable location = /home/ben/venv3_ansible-5.8.0/bin/ansible
python version = 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed -t all
ANSIBLE_NOCOWS(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = True
CACHE_PLUGIN(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = jsonfile
CACHE_PLUGIN_CONNECTION(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = $HOME/ansible/facts
CACHE_PLUGIN_TIMEOUT(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = 600
CALLBACKS_ENABLED(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['timer', 'profile_roles', 'profile_tasks', 'junit']
COLLECTIONS_PATHS(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections', '/home/ben/.ansible/collections', '/usr/share/ansible/col>
DEFAULT_FORKS(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = 20
DEFAULT_GATHERING(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = smart
DEFAULT_INVENTORY_PLUGIN_PATH(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/plugins/inventory', '/home/ben/.ansible/plugins/inventory',>
DEFAULT_LOAD_CALLBACK_PLUGINS(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = True
DEFAULT_ROLES_PATH(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/roles', '/home/ben/.ansible/roles', '/usr/share/ansible/roles', '/etc/>
DEFAULT_STDOUT_CALLBACK(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = yaml
DEFAULT_VAULT_IDENTITY_LIST(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['vcenter@~/workspace/kiewit/ansible_vaults/vcenter', 'azure@~/workspace/kiewit/ansible_vaults/azure', 'ansible_use>
DEFAULT_VAULT_PASSWORD_FILE(env: ANSIBLE_VAULT_PASSWORD_FILE) = /home/ben/workspace/kiewit/ansible_vaults/molecule
GALAXY_SERVER_LIST(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['kiewit_published', 'kiewit_community']
HOST_KEY_CHECKING(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = False
INVENTORY_ENABLED(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['host_list', 'script', 'auto', 'yaml', 'ini', 'toml', 'vmware_vm_inventory', 'azure_rm', 'ldap_inventory']
RETRY_FILES_ENABLED(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = True
VARIABLE_PRECEDENCE(/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg) = ['all_plugins_inventory', 'groups_plugins_inventory', 'all_inventory', 'groups_inventory', 'all_plugins_play', 'groups_plug>
```
### OS / Environment
Ubuntu 20.04 running Ansible installed via `pip` into a virtual environment.
### Steps to Reproduce
```yaml
---
# requirements.yml
collections:
- name: azure.azcollection
version: 1.7.0
- name: community.vmware
version: 1.17.0
- name: community.general
version: 4.5.0
- name: community.windows
version: 1.9.0
# custom published collection
- name: kiewit.content
version: 1.4.0
```
```bash
ansible-galaxy collection install -r collections/requirements.yml -p collections/ -vvvvv --force
```
GalaxyNG installed, custom collection published, remote collections synchronized to GalaxyNG via the following requirements.yml
```yaml
---
collections:
- name: azure.azcollection
- name: community.vmware
- name: community.general
- name: community.windows
```
Notice there are 316 community.vmware versions:

### Expected Results
I expect all listed requirements to be forcefully re-installed.
### Actual Results
```console
#
# Failing with `ansible-galaxy` via Ansible 5.8.0
#
ansible-galaxy [core 2.12.6]
config file = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg
configured module search path = ['/home/ben/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ben/venv3_ansible-5.8.0/lib/python3.8/site-packages/ansible
ansible collection location = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections:/home/ben/.ansible/collections:/usr/share/ansible/collections
executable location = /home/ben/venv3_ansible-5.8.0/bin/ansible-galaxy
python version = 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
jinja version = 3.1.2
libyaml = True
Using /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg as config file
Reading requirement file at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/requirements.yml'
Starting galaxy collection install process
Found installed collection azure.azcollection:1.7.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/azure/azcollection'
Found installed collection ansible.windows:1.10.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/ansible/windows'
Found installed collection community.vmware:2.5.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/vmware'
Found installed collection community.general:4.5.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/general'
Found installed collection community.windows:1.9.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/windows'
Found installed collection kiewit.content:1.4.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/kiewit/content'
Process install dependency map
Initial connection to galaxy_server: https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Found API version 'v3' with Galaxy server kiewit_published (https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/)
Opened /home/ben/.ansible/galaxy_token
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/azure/azcollection/
Initial connection to galaxy_server: https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Found API version 'v3' with Galaxy server kiewit_community (https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/)
Opened /home/ben/.ansible/galaxy_token
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/azure/azcollection/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/community/vmware/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/community/vmware/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/community/vmware/versions/?limit=100
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/plugin/ansible/content/community/collections/index/community/vmware/versions/?limit=100&offset=100
[WARNING]: Skipping Galaxy server https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/. Got an unexpected error when getting available versions of collection community.vmware:
'/api/galaxy/content/community/v3/plugin/ansible/content/community/collections/index/community/vmware/versions/'
ERROR! Failed to resolve the requested dependencies map. Could not satisfy the following requirements:
* community.vmware:2.5.0 (direct request)
#
# Working via ansible-galaxy provided in Ansible 2.9.12
#
ansible-galaxy 2.9.12
config file = /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg
configured module search path = ['/home/ben/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ben/venv3_ansible-2.9.12/lib/python3.8/site-packages/ansible
executable location = /home/ben/venv3_ansible-2.9.12/bin/ansible-galaxy
python version = 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
Using /home/ben/workspace/kiewit/ansible/playbooks/ap_foo/ansible.cfg as config file
Reading requirement file at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/requirements.yml'
Found installed collection azure.azcollection:1.7.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/azure/azcollection'
Found installed collection ansible.windows:1.10.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/ansible/windows'
Found installed collection community.vmware:2.5.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/vmware'
Found installed collection community.general:4.5.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/general'
Found installed collection community.windows:1.9.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/windows'
Found installed collection kiewit.content:1.4.0 at '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/kiewit/content'
Process install dependency map
Initial connection to galaxy_server: https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Opened /home/ben/.ansible/galaxy_token
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Found API version 'v3' with Galaxy server kiewit_published (https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/)
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/azure/azcollection/versions/1.7.0/
Initial connection to galaxy_server: https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Opened /home/ben/.ansible/galaxy_token
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Processing requirement collection 'azure.azcollection'
Collection requirement 'azure.azcollection' is the name of a collection
Collection 'azure.azcollection' is not available from server kiewit_published https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Found API version 'v3' with Galaxy server kiewit_community (https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/)
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/azure/azcollection/versions/1.7.0/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/community/vmware/versions/2.5.0/
Collection 'azure.azcollection' obtained from server kiewit_community https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Processing requirement collection 'community.vmware'
Collection requirement 'community.vmware' is the name of a collection
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/community/vmware/versions/2.5.0/
Collection 'community.vmware' is not available from server kiewit_published https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/community/general/versions/4.5.0/
Collection 'community.vmware' obtained from server kiewit_community https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Processing requirement collection 'community.general'
Collection requirement 'community.general' is the name of a collection
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/community/general/versions/4.5.0/
Collection 'community.general' is not available from server kiewit_published https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/community/windows/versions/1.9.0/
Collection 'community.general' obtained from server kiewit_community https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Processing requirement collection 'community.windows'
Collection requirement 'community.windows' is the name of a collection
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/community/windows/versions/1.9.0/
Collection 'community.windows' is not available from server kiewit_published https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/v3/collections/kiewit/content/versions/1.4.0/
Collection 'community.windows' obtained from server kiewit_community https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Processing requirement collection 'kiewit.content'
Collection requirement 'kiewit.content' is the name of a collection
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/collections/ansible/windows/versions/
Collection 'kiewit.content' obtained from server kiewit_published https://kneawxalp311.kiewitplaza.com/api/galaxy/content/published/
Processing requirement collection 'ansible.windows' - as dependency of community.windows
Collection requirement 'ansible.windows' is the name of a collection
Calling Galaxy at https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/v3/plugin/ansible/content/community/collections/index/ansible/windows/versions/?limit=10&offset=10
Collection 'ansible.windows' obtained from server kiewit_community https://kneawxalp311.kiewitplaza.com/api/galaxy/content/community/
Starting collection install process
Installing 'azure.azcollection:1.7.0' to '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/azure/azcollection'
Downloading https://kneawxalp311.kiewitplaza.com/api/galaxy/v3/plugin/ansible/content/community/collections/artifacts/azure-azcollection-1.7.0.tar.gz to /home/ben/.ansible/tmp/ansible-local-20811912wtbig7j/tmp7pjnjmnu
Validating downloaded file hash 28041b95da141c55e01cc491f004b64cd9a7c7d2d28e622825543309744f689a with expected hash 28041b95da141c55e01cc491f004b64cd9a7c7d2d28e622825543309744f689a
Installing 'community.vmware:2.5.0' to '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/vmware'
Downloading https://kneawxalp311.kiewitplaza.com/api/galaxy/v3/plugin/ansible/content/community/collections/artifacts/community-vmware-2.5.0.tar.gz to /home/ben/.ansible/tmp/ansible-local-20811912wtbig7j/tmp7pjnjmnu
Validating downloaded file hash 4e3aaabdf4802af8ba9c08c77e464c1e0a52abcd7dec7d3aa51b1f865eadfe2a with expected hash 4e3aaabdf4802af8ba9c08c77e464c1e0a52abcd7dec7d3aa51b1f865eadfe2a
Installing 'community.general:4.5.0' to '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/general'
Downloading https://kneawxalp311.kiewitplaza.com/api/galaxy/v3/plugin/ansible/content/community/collections/artifacts/community-general-4.5.0.tar.gz to /home/ben/.ansible/tmp/ansible-local-20811912wtbig7j/tmp7pjnjmnu
Validating downloaded file hash c191817d1c19fef2ec2927d65dbec65f3f0bbba60523b95ec163a48ba39dbcf0 with expected hash c191817d1c19fef2ec2927d65dbec65f3f0bbba60523b95ec163a48ba39dbcf0
Installing 'community.windows:1.9.0' to '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/community/windows'
Downloading https://kneawxalp311.kiewitplaza.com/api/galaxy/v3/plugin/ansible/content/community/collections/artifacts/community-windows-1.9.0.tar.gz to /home/ben/.ansible/tmp/ansible-local-20811912wtbig7j/tmp7pjnjmnu
Validating downloaded file hash 63909e16fc055e39266127098f7c11e9da5e6000b231be5bdac74c1758ef9e9b with expected hash 63909e16fc055e39266127098f7c11e9da5e6000b231be5bdac74c1758ef9e9b
Installing 'kiewit.content:1.4.0' to '/home/ben/workspace/kiewit/ansible/playbooks/ap_foo/collections/ansible_collections/kiewit/content'
Downloading https://kneawxalp311.kiewitplaza.com/api/galaxy/v3/plugin/ansible/content/published/collections/artifacts/kiewit-content-1.4.0.tar.gz to /home/ben/.ansible/tmp/ansible-local-20811912wtbig7j/tmp7pjnjmnu
Validating downloaded file hash 5c72fee203e0e927ab7451cd0d7206c549f4280208f464ec0d5078711c70beda with expected hash 5c72fee203e0e927ab7451cd0d7206c549f4280208f464ec0d5078711c70beda
Skipping 'ansible.windows' as it is already installed
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/77911
|
https://github.com/ansible/ansible/pull/78325
|
1429672213af648aef239138745d593e2920ebdd
|
5728d72cda94e24314a95befb0130b9e773f1035
| 2022-05-25T18:21:23Z |
python
| 2022-07-21T21:18:07Z |
test/units/galaxy/test_api.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import re
import pytest
import stat
import tarfile
import tempfile
import time
from io import BytesIO, StringIO
from unittest.mock import MagicMock
import ansible.constants as C
from ansible import context
from ansible.errors import AnsibleError
from ansible.galaxy import api as galaxy_api
from ansible.galaxy.api import CollectionVersionMetadata, GalaxyAPI, GalaxyError
from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.six.moves.urllib import error as urllib_error
from ansible.utils import context_objects as co
from ansible.utils.display import Display
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
# Required to initialise the GalaxyAPI object
context.CLIARGS._store = {'ignore_certs': False}
yield
co.GlobalCLIArgs._Singleton__instance = None
@pytest.fixture()
def collection_artifact(tmp_path_factory):
''' Creates a collection artifact tarball that is ready to be published '''
output_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Output'))
tar_path = os.path.join(output_dir, 'namespace-collection-v1.0.0.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(b"\x00\x01\x02\x03")
tar_info = tarfile.TarInfo('test')
tar_info.size = 4
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
yield tar_path
@pytest.fixture()
def cache_dir(tmp_path_factory, monkeypatch):
cache_dir = to_text(tmp_path_factory.mktemp('Test ÅÑŚÌβŁÈ Galaxy Cache'))
monkeypatch.setattr(C, 'GALAXY_CACHE_DIR', cache_dir)
yield cache_dir
def get_test_galaxy_api(url, version, token_ins=None, token_value=None, no_cache=True):
token_value = token_value or "my token"
token_ins = token_ins or GalaxyToken(token_value)
api = GalaxyAPI(None, "test", url, no_cache=no_cache)
# Warning, this doesn't test g_connect() because _availabe_api_versions is set here. That means
# that urls for v2 servers have to append '/api/' themselves in the input data.
api._available_api_versions = {version: '%s' % version}
api.token = token_ins
return api
def get_collection_versions(namespace='namespace', name='collection'):
base_url = 'https://galaxy.server.com/api/v2/collections/{0}/{1}/'.format(namespace, name)
versions_url = base_url + 'versions/'
# Response for collection info
responses = [
{
"id": 1000,
"href": base_url,
"name": name,
"namespace": {
"id": 30000,
"href": "https://galaxy.ansible.com/api/v1/namespaces/30000/",
"name": namespace,
},
"versions_url": versions_url,
"latest_version": {
"version": "1.0.5",
"href": versions_url + "1.0.5/"
},
"deprecated": False,
"created": "2021-02-09T16:55:42.749915-05:00",
"modified": "2021-02-09T16:55:42.749915-05:00",
}
]
# Paginated responses for versions
page_versions = (('1.0.0', '1.0.1',), ('1.0.2', '1.0.3',), ('1.0.4', '1.0.5'),)
last_page = None
for page in range(1, len(page_versions) + 1):
if page < len(page_versions):
next_page = versions_url + '?page={0}'.format(page + 1)
else:
next_page = None
version_results = []
for version in page_versions[int(page - 1)]:
version_results.append(
{'version': version, 'href': versions_url + '{0}/'.format(version)}
)
responses.append(
{
'count': 6,
'next': next_page,
'previous': last_page,
'results': version_results,
}
)
last_page = page
return responses
def test_api_no_auth():
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
actual = {}
api._add_auth_token(actual, "")
assert actual == {}
def test_api_no_auth_but_required():
expected = "No access token or username set. A token can be set with --api-key or at "
with pytest.raises(AnsibleError, match=expected):
GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")._add_auth_token({}, "", required=True)
def test_api_token_auth():
token = GalaxyToken(token=u"my_token")
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
api._add_auth_token(actual, "", required=True)
assert actual == {'Authorization': 'Token my_token'}
def test_api_token_auth_with_token_type(monkeypatch):
token = KeycloakToken(auth_url='https://api.test/')
mock_token_get = MagicMock()
mock_token_get.return_value = 'my_token'
monkeypatch.setattr(token, 'get', mock_token_get)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
api._add_auth_token(actual, "", token_type="Bearer", required=True)
assert actual == {'Authorization': 'Bearer my_token'}
def test_api_token_auth_with_v3_url(monkeypatch):
token = KeycloakToken(auth_url='https://api.test/')
mock_token_get = MagicMock()
mock_token_get.return_value = 'my_token'
monkeypatch.setattr(token, 'get', mock_token_get)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
api._add_auth_token(actual, "https://galaxy.ansible.com/api/v3/resource/name", required=True)
assert actual == {'Authorization': 'Bearer my_token'}
def test_api_token_auth_with_v2_url():
token = GalaxyToken(token=u"my_token")
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
# Add v3 to random part of URL but response should only see the v2 as the full URI path segment.
api._add_auth_token(actual, "https://galaxy.ansible.com/api/v2/resourcev3/name", required=True)
assert actual == {'Authorization': 'Token my_token'}
def test_api_basic_auth_password():
token = BasicAuthToken(username=u"user", password=u"pass")
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
api._add_auth_token(actual, "", required=True)
assert actual == {'Authorization': 'Basic dXNlcjpwYXNz'}
def test_api_basic_auth_no_password():
token = BasicAuthToken(username=u"user")
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
api._add_auth_token(actual, "", required=True)
assert actual == {'Authorization': 'Basic dXNlcjo='}
def test_api_dont_override_auth_header():
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
actual = {'Authorization': 'Custom token'}
api._add_auth_token(actual, "", required=True)
assert actual == {'Authorization': 'Custom token'}
def test_initialise_galaxy(monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(u'{"available_versions":{"v1":"v1/"}}'),
StringIO(u'{"token":"my token"}'),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
actual = api.authenticate("github_token")
assert len(api.available_api_versions) == 2
assert api.available_api_versions['v1'] == u'v1/'
assert api.available_api_versions['v2'] == u'v2/'
assert actual == {u'token': u'my token'}
assert mock_open.call_count == 2
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
assert mock_open.mock_calls[1][1][0] == 'https://galaxy.ansible.com/api/v1/tokens/'
assert 'ansible-galaxy' in mock_open.mock_calls[1][2]['http_agent']
assert mock_open.mock_calls[1][2]['data'] == 'github_token=github_token'
def test_initialise_galaxy_with_auth(monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(u'{"available_versions":{"v1":"v1/"}}'),
StringIO(u'{"token":"my token"}'),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=GalaxyToken(token='my_token'))
actual = api.authenticate("github_token")
assert len(api.available_api_versions) == 2
assert api.available_api_versions['v1'] == u'v1/'
assert api.available_api_versions['v2'] == u'v2/'
assert actual == {u'token': u'my token'}
assert mock_open.call_count == 2
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
assert mock_open.mock_calls[1][1][0] == 'https://galaxy.ansible.com/api/v1/tokens/'
assert 'ansible-galaxy' in mock_open.mock_calls[1][2]['http_agent']
assert mock_open.mock_calls[1][2]['data'] == 'github_token=github_token'
def test_initialise_automation_hub(monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(u'{"available_versions":{"v2": "v2/", "v3":"v3/"}}'),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
token = KeycloakToken(auth_url='https://api.test/')
mock_token_get = MagicMock()
mock_token_get.return_value = 'my_token'
monkeypatch.setattr(token, 'get', mock_token_get)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
assert len(api.available_api_versions) == 2
assert api.available_api_versions['v2'] == u'v2/'
assert api.available_api_versions['v3'] == u'v3/'
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
assert mock_open.mock_calls[0][2]['headers'] == {'Authorization': 'Bearer my_token'}
def test_initialise_unknown(monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = [
urllib_error.HTTPError('https://galaxy.ansible.com/api/', 500, 'msg', {}, StringIO(u'{"msg":"raw error"}')),
urllib_error.HTTPError('https://galaxy.ansible.com/api/api/', 500, 'msg', {}, StringIO(u'{"msg":"raw error"}')),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=GalaxyToken(token='my_token'))
expected = "Error when finding available api versions from test (%s) (HTTP Code: 500, Message: msg)" \
% api.api_server
with pytest.raises(AnsibleError, match=re.escape(expected)):
api.authenticate("github_token")
def test_get_available_api_versions(monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(u'{"available_versions":{"v1":"v1/","v2":"v2/"}}'),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
actual = api.available_api_versions
assert len(actual) == 2
assert actual['v1'] == u'v1/'
assert actual['v2'] == u'v2/'
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
def test_publish_collection_missing_file():
fake_path = u'/fake/ÅÑŚÌβŁÈ/path'
expected = to_native("The collection path specified '%s' does not exist." % fake_path)
api = get_test_galaxy_api("https://galaxy.ansible.com/api/", "v2")
with pytest.raises(AnsibleError, match=expected):
api.publish_collection(fake_path)
def test_publish_collection_not_a_tarball():
expected = "The collection path specified '{0}' is not a tarball, use 'ansible-galaxy collection build' to " \
"create a proper release artifact."
api = get_test_galaxy_api("https://galaxy.ansible.com/api/", "v2")
with tempfile.NamedTemporaryFile(prefix=u'ÅÑŚÌβŁÈ') as temp_file:
temp_file.write(b"\x00")
temp_file.flush()
with pytest.raises(AnsibleError, match=expected.format(to_native(temp_file.name))):
api.publish_collection(temp_file.name)
def test_publish_collection_unsupported_version():
expected = "Galaxy action publish_collection requires API versions 'v2, v3' but only 'v1' are available on test " \
"https://galaxy.ansible.com/api/"
api = get_test_galaxy_api("https://galaxy.ansible.com/api/", "v1")
with pytest.raises(AnsibleError, match=expected):
api.publish_collection("path")
@pytest.mark.parametrize('api_version, collection_url', [
('v2', 'collections'),
('v3', 'artifacts/collections'),
])
def test_publish_collection(api_version, collection_url, collection_artifact, monkeypatch):
api = get_test_galaxy_api("https://galaxy.ansible.com/api/", api_version)
mock_call = MagicMock()
mock_call.return_value = {'task': 'http://task.url/'}
monkeypatch.setattr(api, '_call_galaxy', mock_call)
actual = api.publish_collection(collection_artifact)
assert actual == 'http://task.url/'
assert mock_call.call_count == 1
assert mock_call.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/%s/%s/' % (api_version, collection_url)
assert mock_call.mock_calls[0][2]['headers']['Content-length'] == len(mock_call.mock_calls[0][2]['args'])
assert mock_call.mock_calls[0][2]['headers']['Content-type'].startswith(
'multipart/form-data; boundary=')
assert mock_call.mock_calls[0][2]['args'].startswith(b'--')
assert mock_call.mock_calls[0][2]['method'] == 'POST'
assert mock_call.mock_calls[0][2]['auth_required'] is True
@pytest.mark.parametrize('api_version, collection_url, response, expected', [
('v2', 'collections', {},
'Error when publishing collection to test (%s) (HTTP Code: 500, Message: msg Code: Unknown)'),
('v2', 'collections', {
'message': u'Galaxy error messäge',
'code': 'GWE002',
}, u'Error when publishing collection to test (%s) (HTTP Code: 500, Message: Galaxy error messäge Code: GWE002)'),
('v3', 'artifact/collections', {},
'Error when publishing collection to test (%s) (HTTP Code: 500, Message: msg Code: Unknown)'),
('v3', 'artifact/collections', {
'errors': [
{
'code': 'conflict.collection_exists',
'detail': 'Collection "mynamespace-mycollection-4.1.1" already exists.',
'title': 'Conflict.',
'status': '400',
},
{
'code': 'quantum_improbability',
'title': u'Rändom(?) quantum improbability.',
'source': {'parameter': 'the_arrow_of_time'},
'meta': {'remediation': 'Try again before'},
},
],
}, u'Error when publishing collection to test (%s) (HTTP Code: 500, Message: Collection '
u'"mynamespace-mycollection-4.1.1" already exists. Code: conflict.collection_exists), (HTTP Code: 500, '
u'Message: Rändom(?) quantum improbability. Code: quantum_improbability)')
])
def test_publish_failure(api_version, collection_url, response, expected, collection_artifact, monkeypatch):
api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version)
expected_url = '%s/api/%s/%s' % (api.api_server, api_version, collection_url)
mock_open = MagicMock()
mock_open.side_effect = urllib_error.HTTPError(expected_url, 500, 'msg', {},
StringIO(to_text(json.dumps(response))))
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
with pytest.raises(GalaxyError, match=re.escape(to_native(expected % api.api_server))):
api.publish_collection(collection_artifact)
@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
('https://galaxy.server.com/api', 'v2', 'Token', GalaxyToken('my token'),
'1234',
'https://galaxy.server.com/api/v2/collection-imports/1234/'),
('https://galaxy.server.com/api/automation-hub/', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
'1234',
'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
])
def test_wait_import_task(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.return_value = StringIO(u'{"state":"success","finished_at":"time"}')
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
api.wait_import_task(import_uri)
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == full_import_uri
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
('https://galaxy.server.com/api/', 'v2', 'Token', GalaxyToken('my token'),
'1234',
'https://galaxy.server.com/api/v2/collection-imports/1234/'),
('https://galaxy.server.com/api/automation-hub', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
'1234',
'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
])
def test_wait_import_task_multiple_requests(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(u'{"state":"test"}'),
StringIO(u'{"state":"success","finished_at":"time"}'),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_vvv = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_vvv)
monkeypatch.setattr(time, 'sleep', MagicMock())
api.wait_import_task(import_uri)
assert mock_open.call_count == 2
assert mock_open.mock_calls[0][1][0] == full_import_uri
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_open.mock_calls[1][1][0] == full_import_uri
assert mock_open.mock_calls[1][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
assert mock_vvv.call_count == 1
assert mock_vvv.mock_calls[0][1][0] == \
'Galaxy import process has a status of test, wait 2 seconds before trying again'
@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri,', [
('https://galaxy.server.com/api/', 'v2', 'Token', GalaxyToken('my token'),
'1234',
'https://galaxy.server.com/api/v2/collection-imports/1234/'),
('https://galaxy.server.com/api/automation-hub/', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
'1234',
'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
])
def test_wait_import_task_with_failure(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(to_text(json.dumps({
'finished_at': 'some_time',
'state': 'failed',
'error': {
'code': 'GW001',
'description': u'Becäuse I said so!',
},
'messages': [
{
'level': 'ERrOR',
'message': u'Somé error',
},
{
'level': 'WARNiNG',
'message': u'Some wärning',
},
{
'level': 'INFO',
'message': u'Somé info',
},
],
}))),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_vvv = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_vvv)
mock_warn = MagicMock()
monkeypatch.setattr(Display, 'warning', mock_warn)
mock_err = MagicMock()
monkeypatch.setattr(Display, 'error', mock_err)
expected = to_native(u'Galaxy import process failed: Becäuse I said so! (Code: GW001)')
with pytest.raises(AnsibleError, match=re.escape(expected)):
api.wait_import_task(import_uri)
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == full_import_uri
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
assert mock_vvv.call_count == 1
assert mock_vvv.mock_calls[0][1][0] == u'Galaxy import message: INFO - Somé info'
assert mock_warn.call_count == 1
assert mock_warn.mock_calls[0][1][0] == u'Galaxy import warning message: Some wärning'
assert mock_err.call_count == 1
assert mock_err.mock_calls[0][1][0] == u'Galaxy import error message: Somé error'
@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
('https://galaxy.server.com/api/', 'v2', 'Token', GalaxyToken('my_token'),
'1234',
'https://galaxy.server.com/api/v2/collection-imports/1234/'),
('https://galaxy.server.com/api/automation-hub/', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
'1234',
'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
])
def test_wait_import_task_with_failure_no_error(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(to_text(json.dumps({
'finished_at': 'some_time',
'state': 'failed',
'error': {},
'messages': [
{
'level': 'ERROR',
'message': u'Somé error',
},
{
'level': 'WARNING',
'message': u'Some wärning',
},
{
'level': 'INFO',
'message': u'Somé info',
},
],
}))),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_vvv = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_vvv)
mock_warn = MagicMock()
monkeypatch.setattr(Display, 'warning', mock_warn)
mock_err = MagicMock()
monkeypatch.setattr(Display, 'error', mock_err)
expected = 'Galaxy import process failed: Unknown error, see %s for more details \\(Code: UNKNOWN\\)' % full_import_uri
with pytest.raises(AnsibleError, match=expected):
api.wait_import_task(import_uri)
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == full_import_uri
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
assert mock_vvv.call_count == 1
assert mock_vvv.mock_calls[0][1][0] == u'Galaxy import message: INFO - Somé info'
assert mock_warn.call_count == 1
assert mock_warn.mock_calls[0][1][0] == u'Galaxy import warning message: Some wärning'
assert mock_err.call_count == 1
assert mock_err.mock_calls[0][1][0] == u'Galaxy import error message: Somé error'
@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
('https://galaxy.server.com/api', 'v2', 'Token', GalaxyToken('my token'),
'1234',
'https://galaxy.server.com/api/v2/collection-imports/1234/'),
('https://galaxy.server.com/api/automation-hub', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
'1234',
'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
])
def test_wait_import_task_timeout(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
def return_response(*args, **kwargs):
return StringIO(u'{"state":"waiting"}')
mock_open = MagicMock()
mock_open.side_effect = return_response
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_vvv = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_vvv)
monkeypatch.setattr(time, 'sleep', MagicMock())
expected = "Timeout while waiting for the Galaxy import process to finish, check progress at '%s'" % full_import_uri
with pytest.raises(AnsibleError, match=expected):
api.wait_import_task(import_uri, 1)
assert mock_open.call_count > 1
assert mock_open.mock_calls[0][1][0] == full_import_uri
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_open.mock_calls[1][1][0] == full_import_uri
assert mock_open.mock_calls[1][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
# expected_wait_msg = 'Galaxy import process has a status of waiting, wait {0} seconds before trying again'
assert mock_vvv.call_count > 9 # 1st is opening Galaxy token file.
# FIXME:
# assert mock_vvv.mock_calls[1][1][0] == expected_wait_msg.format(2)
# assert mock_vvv.mock_calls[2][1][0] == expected_wait_msg.format(3)
# assert mock_vvv.mock_calls[3][1][0] == expected_wait_msg.format(4)
# assert mock_vvv.mock_calls[4][1][0] == expected_wait_msg.format(6)
# assert mock_vvv.mock_calls[5][1][0] == expected_wait_msg.format(10)
# assert mock_vvv.mock_calls[6][1][0] == expected_wait_msg.format(15)
# assert mock_vvv.mock_calls[7][1][0] == expected_wait_msg.format(22)
# assert mock_vvv.mock_calls[8][1][0] == expected_wait_msg.format(30)
@pytest.mark.parametrize('api_version, token_type, version, token_ins', [
('v2', None, 'v2.1.13', None),
('v3', 'Bearer', 'v1.0.0', KeycloakToken(auth_url='https://api.test/api/automation-hub/')),
])
def test_get_collection_version_metadata_no_version(api_version, token_type, version, token_ins, monkeypatch):
api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(to_text(json.dumps({
'href': 'https://galaxy.server.com/api/{api}/namespace/name/versions/{version}/'.format(api=api_version, version=version),
'download_url': 'https://downloadme.com',
'artifact': {
'sha256': 'ac47b6fac117d7c171812750dacda655b04533cf56b31080b82d1c0db3c9d80f',
},
'namespace': {
'name': 'namespace',
},
'collection': {
'name': 'collection',
},
'version': version,
'metadata': {
'dependencies': {},
}
}))),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual = api.get_collection_version_metadata('namespace', 'collection', version)
assert isinstance(actual, CollectionVersionMetadata)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.download_url == u'https://downloadme.com'
assert actual.artifact_sha256 == u'ac47b6fac117d7c171812750dacda655b04533cf56b31080b82d1c0db3c9d80f'
assert actual.version == version
assert actual.dependencies == {}
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == '%s%s/collections/namespace/collection/versions/%s/' \
% (api.api_server, api_version, version)
# v2 calls dont need auth, so no authz header or token_type
if token_type:
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
@pytest.mark.parametrize('api_version, token_type, token_ins, version', [
('v2', None, None, '2.1.13'),
('v3', 'Bearer', KeycloakToken(auth_url='https://api.test/api/automation-hub/'), '1.0.0'),
])
def test_get_collection_signatures_backwards_compat(api_version, token_type, token_ins, version, monkeypatch):
api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO("{}")
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual = api.get_collection_signatures('namespace', 'collection', version)
assert actual == []
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == '%s%s/collections/namespace/collection/versions/%s/' \
% (api.api_server, api_version, version)
# v2 calls dont need auth, so no authz header or token_type
if token_type:
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
@pytest.mark.parametrize('api_version, token_type, token_ins, version', [
('v2', None, None, '2.1.13'),
('v3', 'Bearer', KeycloakToken(auth_url='https://api.test/api/automation-hub/'), '1.0.0'),
])
def test_get_collection_signatures(api_version, token_type, token_ins, version, monkeypatch):
api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(to_text(json.dumps({
'signatures': [
{
"signature": "-----BEGIN PGP SIGNATURE-----\nSIGNATURE1\n-----END PGP SIGNATURE-----\n",
"pubkey_fingerprint": "FINGERPRINT",
"signing_service": "ansible-default",
"pulp_created": "2022-01-14T14:05:53.835605Z",
},
{
"signature": "-----BEGIN PGP SIGNATURE-----\nSIGNATURE2\n-----END PGP SIGNATURE-----\n",
"pubkey_fingerprint": "FINGERPRINT",
"signing_service": "ansible-default",
"pulp_created": "2022-01-14T14:05:53.835605Z",
},
],
}))),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual = api.get_collection_signatures('namespace', 'collection', version)
assert actual == [
"-----BEGIN PGP SIGNATURE-----\nSIGNATURE1\n-----END PGP SIGNATURE-----\n",
"-----BEGIN PGP SIGNATURE-----\nSIGNATURE2\n-----END PGP SIGNATURE-----\n"
]
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == '%s%s/collections/namespace/collection/versions/%s/' \
% (api.api_server, api_version, version)
# v2 calls dont need auth, so no authz header or token_type
if token_type:
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
@pytest.mark.parametrize('api_version, token_type, token_ins, response', [
('v2', None, None, {
'count': 2,
'next': None,
'previous': None,
'results': [
{
'version': '1.0.0',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.0',
},
{
'version': '1.0.1',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.1',
},
],
}),
# TODO: Verify this once Automation Hub is actually out
('v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'), {
'count': 2,
'next': None,
'previous': None,
'data': [
{
'version': '1.0.0',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.0',
},
{
'version': '1.0.1',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.1',
},
],
}),
])
def test_get_collection_versions(api_version, token_type, token_ins, response, monkeypatch):
api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(to_text(json.dumps(response))),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual = api.get_collection_versions('namespace', 'collection')
assert actual == [u'1.0.0', u'1.0.1']
page_query = '?limit=100' if api_version == 'v3' else '?page_size=100'
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
'versions/%s' % (api_version, page_query)
if token_ins:
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
@pytest.mark.parametrize('api_version, token_type, token_ins, responses', [
('v2', None, None, [
{
'count': 6,
'next': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/?page=2&page_size=100',
'previous': None,
'results': [ # Pay no mind, using more manageable results than page_size would indicate
{
'version': '1.0.0',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.0',
},
{
'version': '1.0.1',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.1',
},
],
},
{
'count': 6,
'next': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/?page=3&page_size=100',
'previous': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions',
'results': [
{
'version': '1.0.2',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.2',
},
{
'version': '1.0.3',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.3',
},
],
},
{
'count': 6,
'next': None,
'previous': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/?page=2&page_size=100',
'results': [
{
'version': '1.0.4',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.4',
},
{
'version': '1.0.5',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.5',
},
],
},
]),
('v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'), [
{
'count': 6,
'links': {
# v3 links are relative and the limit is included during pagination
'next': '/api/v3/collections/namespace/collection/versions/?limit=100&offset=100',
'previous': None,
},
'data': [
{
'version': '1.0.0',
'href': '/api/v3/collections/namespace/collection/versions/1.0.0',
},
{
'version': '1.0.1',
'href': '/api/v3/collections/namespace/collection/versions/1.0.1',
},
],
},
{
'count': 6,
'links': {
'next': '/api/v3/collections/namespace/collection/versions/?limit=100&offset=200',
'previous': '/api/v3/collections/namespace/collection/versions',
},
'data': [
{
'version': '1.0.2',
'href': '/api/v3/collections/namespace/collection/versions/1.0.2',
},
{
'version': '1.0.3',
'href': '/api/v3/collections/namespace/collection/versions/1.0.3',
},
],
},
{
'count': 6,
'links': {
'next': None,
'previous': '/api/v3/collections/namespace/collection/versions/?limit=100&offset=100',
},
'data': [
{
'version': '1.0.4',
'href': '/api/v3/collections/namespace/collection/versions/1.0.4',
},
{
'version': '1.0.5',
'href': '/api/v3/collections/namespace/collection/versions/1.0.5',
},
],
},
]),
])
def test_get_collection_versions_pagination(api_version, token_type, token_ins, responses, monkeypatch):
api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [StringIO(to_text(json.dumps(r))) for r in responses]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual = api.get_collection_versions('namespace', 'collection')
assert actual == [u'1.0.0', u'1.0.1', u'1.0.2', u'1.0.3', u'1.0.4', u'1.0.5']
assert mock_open.call_count == 3
if api_version == 'v3':
query_1 = 'limit=100'
query_2 = 'limit=100&offset=100'
query_3 = 'limit=100&offset=200'
else:
query_1 = 'page_size=100'
query_2 = 'page=2&page_size=100'
query_3 = 'page=3&page_size=100'
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
'versions/?%s' % (api_version, query_1)
assert mock_open.mock_calls[1][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
'versions/?%s' % (api_version, query_2)
assert mock_open.mock_calls[2][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
'versions/?%s' % (api_version, query_3)
if token_type:
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_open.mock_calls[1][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_open.mock_calls[2][2]['headers']['Authorization'] == '%s my token' % token_type
@pytest.mark.parametrize('responses', [
[
{
'count': 2,
'results': [{'name': '3.5.1', }, {'name': '3.5.2'}],
'next_link': None,
'next': None,
'previous_link': None,
'previous': None
},
],
[
{
'count': 2,
'results': [{'name': '3.5.1'}],
'next_link': '/api/v1/roles/432/versions/?page=2&page_size=50',
'next': '/roles/432/versions/?page=2&page_size=50',
'previous_link': None,
'previous': None
},
{
'count': 2,
'results': [{'name': '3.5.2'}],
'next_link': None,
'next': None,
'previous_link': '/api/v1/roles/432/versions/?&page_size=50',
'previous': '/roles/432/versions/?page_size=50',
},
]
])
def test_get_role_versions_pagination(monkeypatch, responses):
api = get_test_galaxy_api('https://galaxy.com/api/', 'v1')
mock_open = MagicMock()
mock_open.side_effect = [StringIO(to_text(json.dumps(r))) for r in responses]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual = api.fetch_role_related('versions', 432)
assert actual == [{'name': '3.5.1'}, {'name': '3.5.2'}]
assert mock_open.call_count == len(responses)
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.com/api/v1/roles/432/versions/?page_size=50'
if len(responses) == 2:
assert mock_open.mock_calls[1][1][0] == 'https://galaxy.com/api/v1/roles/432/versions/?page=2&page_size=50'
def test_missing_cache_dir(cache_dir):
os.rmdir(cache_dir)
GalaxyAPI(None, "test", 'https://galaxy.ansible.com/', no_cache=False)
assert os.path.isdir(cache_dir)
assert stat.S_IMODE(os.stat(cache_dir).st_mode) == 0o700
cache_file = os.path.join(cache_dir, 'api.json')
with open(cache_file) as fd:
actual_cache = fd.read()
assert actual_cache == '{"version": 1}'
assert stat.S_IMODE(os.stat(cache_file).st_mode) == 0o600
def test_existing_cache(cache_dir):
cache_file = os.path.join(cache_dir, 'api.json')
cache_file_contents = '{"version": 1, "test": "json"}'
with open(cache_file, mode='w') as fd:
fd.write(cache_file_contents)
os.chmod(cache_file, 0o655)
GalaxyAPI(None, "test", 'https://galaxy.ansible.com/', no_cache=False)
assert os.path.isdir(cache_dir)
with open(cache_file) as fd:
actual_cache = fd.read()
assert actual_cache == cache_file_contents
assert stat.S_IMODE(os.stat(cache_file).st_mode) == 0o655
@pytest.mark.parametrize('content', [
'',
'value',
'{"de" "finit" "ely" [\'invalid"]}',
'[]',
'{"version": 2, "test": "json"}',
'{"version": 2, "key": "ÅÑŚÌβŁÈ"}',
])
def test_cache_invalid_cache_content(content, cache_dir):
cache_file = os.path.join(cache_dir, 'api.json')
with open(cache_file, mode='w') as fd:
fd.write(content)
os.chmod(cache_file, 0o664)
GalaxyAPI(None, "test", 'https://galaxy.ansible.com/', no_cache=False)
with open(cache_file) as fd:
actual_cache = fd.read()
assert actual_cache == '{"version": 1}'
assert stat.S_IMODE(os.stat(cache_file).st_mode) == 0o664
def test_cache_complete_pagination(cache_dir, monkeypatch):
responses = get_collection_versions()
cache_file = os.path.join(cache_dir, 'api.json')
api = get_test_galaxy_api('https://galaxy.server.com/api/', 'v2', no_cache=False)
mock_open = MagicMock(
side_effect=[
StringIO(to_text(json.dumps(r)))
for r in responses
]
)
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual_versions = api.get_collection_versions('namespace', 'collection')
assert actual_versions == [u'1.0.0', u'1.0.1', u'1.0.2', u'1.0.3', u'1.0.4', u'1.0.5']
with open(cache_file) as fd:
final_cache = json.loads(fd.read())
cached_server = final_cache['galaxy.server.com:']
cached_collection = cached_server['/api/v2/collections/namespace/collection/versions/']
cached_versions = [r['version'] for r in cached_collection['results']]
assert final_cache == api._cache
assert cached_versions == actual_versions
def test_cache_flaky_pagination(cache_dir, monkeypatch):
responses = get_collection_versions()
cache_file = os.path.join(cache_dir, 'api.json')
api = get_test_galaxy_api('https://galaxy.server.com/api/', 'v2', no_cache=False)
# First attempt, fail midway through
mock_open = MagicMock(
side_effect=[
StringIO(to_text(json.dumps(responses[0]))),
StringIO(to_text(json.dumps(responses[1]))),
urllib_error.HTTPError(responses[1]['next'], 500, 'Error', {}, StringIO()),
StringIO(to_text(json.dumps(responses[3]))),
]
)
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
expected = (
r'Error when getting available collection versions for namespace\.collection '
r'from test \(https://galaxy\.server\.com/api/\) '
r'\(HTTP Code: 500, Message: Error Code: Unknown\)'
)
with pytest.raises(GalaxyError, match=expected):
api.get_collection_versions('namespace', 'collection')
with open(cache_file) as fd:
final_cache = json.loads(fd.read())
assert final_cache == {
'version': 1,
'galaxy.server.com:': {
'modified': {
'namespace.collection': responses[0]['modified']
}
}
}
# Reset API
api = get_test_galaxy_api('https://galaxy.server.com/api/', 'v2', no_cache=False)
# Second attempt is successful so cache should be populated
mock_open = MagicMock(
side_effect=[
StringIO(to_text(json.dumps(r)))
for r in responses
]
)
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual_versions = api.get_collection_versions('namespace', 'collection')
assert actual_versions == [u'1.0.0', u'1.0.1', u'1.0.2', u'1.0.3', u'1.0.4', u'1.0.5']
with open(cache_file) as fd:
final_cache = json.loads(fd.read())
cached_server = final_cache['galaxy.server.com:']
cached_collection = cached_server['/api/v2/collections/namespace/collection/versions/']
cached_versions = [r['version'] for r in cached_collection['results']]
assert cached_versions == actual_versions
def test_world_writable_cache(cache_dir, monkeypatch):
mock_warning = MagicMock()
monkeypatch.setattr(Display, 'warning', mock_warning)
cache_file = os.path.join(cache_dir, 'api.json')
with open(cache_file, mode='w') as fd:
fd.write('{"version": 2}')
os.chmod(cache_file, 0o666)
api = GalaxyAPI(None, "test", 'https://galaxy.ansible.com/', no_cache=False)
assert api._cache is None
with open(cache_file) as fd:
actual_cache = fd.read()
assert actual_cache == '{"version": 2}'
assert stat.S_IMODE(os.stat(cache_file).st_mode) == 0o666
assert mock_warning.call_count == 1
assert mock_warning.call_args[0][0] == \
'Galaxy cache has world writable access (%s), ignoring it as a cache source.' % cache_file
def test_no_cache(cache_dir):
cache_file = os.path.join(cache_dir, 'api.json')
with open(cache_file, mode='w') as fd:
fd.write('random')
api = GalaxyAPI(None, "test", 'https://galaxy.ansible.com/')
assert api._cache is None
with open(cache_file) as fd:
actual_cache = fd.read()
assert actual_cache == 'random'
def test_clear_cache_with_no_cache(cache_dir):
cache_file = os.path.join(cache_dir, 'api.json')
with open(cache_file, mode='w') as fd:
fd.write('{"version": 1, "key": "value"}')
GalaxyAPI(None, "test", 'https://galaxy.ansible.com/', clear_response_cache=True)
assert not os.path.exists(cache_file)
def test_clear_cache(cache_dir):
cache_file = os.path.join(cache_dir, 'api.json')
with open(cache_file, mode='w') as fd:
fd.write('{"version": 1, "key": "value"}')
GalaxyAPI(None, "test", 'https://galaxy.ansible.com/', clear_response_cache=True, no_cache=False)
with open(cache_file) as fd:
actual_cache = fd.read()
assert actual_cache == '{"version": 1}'
assert stat.S_IMODE(os.stat(cache_file).st_mode) == 0o600
@pytest.mark.parametrize(['url', 'expected'], [
('http://hostname/path', 'hostname:'),
('http://hostname:80/path', 'hostname:80'),
('https://testing.com:invalid', 'testing.com:'),
('https://testing.com:1234', 'testing.com:1234'),
('https://username:[email protected]/path', 'testing.com:'),
('https://username:[email protected]:443/path', 'testing.com:443'),
])
def test_cache_id(url, expected):
actual = galaxy_api.get_cache_id(url)
assert actual == expected
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,198 |
Examples in ansible-test sanity ignores are out-of-date
|
### Summary
https://docs.ansible.com/ansible/latest/dev_guide/testing/sanity/ignores.html#ignore-file-format examples here are not up-to-date, and especially with #67032, this is a bit difficult to get working. After looking around in the source code for a bit, we were able to figure out that `E105` is just not used anymore, but finding this was difficult.
### Issue Type
Documentation Report
### Component Name
https://github.com/ansible/ansible/blob/devel/docs/docsite/rst/dev_guide/testing/sanity/ignores.rst
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.1]
config file = None
configured module search path = ['/home/jcgruenhage/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.10/site-packages/ansible
ansible collection location = /home/jcgruenhage/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.10.5 (main, Jun 8 2022, 02:00:39) [GCC 10.2.1 20201203]
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
```
### OS / Environment
Void Linux
### Additional Information
Improved documentation here would help people who need to ignore specific tests because they aren't applicable in their certain case, like the GPLv3 header for situations like internal modules, or even modules licensed under stricter licenses such as the AGPLv3 (which we do have in certain cases as well).
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78198
|
https://github.com/ansible/ansible/pull/78342
|
a3c90dd0bcb4aecfc64a4a584e52aec77ee61158
|
0f688f07af8d84cff71726eb855ee6ae5e6e352d
| 2022-07-04T10:39:03Z |
python
| 2022-07-27T16:33:35Z |
docs/docsite/rst/dev_guide/testing/sanity/ignores.rst
|
ignores
=======
Sanity tests for individual files can be skipped, and specific errors can be ignored.
When to Ignore Errors
---------------------
Sanity tests are designed to improve code quality and identify common issues with content.
When issues are identified during development, those issues should be corrected.
As development of Ansible continues, sanity tests are expanded to detect issues that previous releases could not.
To allow time for existing content to be updated to pass newer tests, ignore entries can be added.
New content should not use ignores for existing sanity tests.
When code is fixed to resolve sanity test errors, any relevant ignores must also be removed.
If the ignores are not removed, this will be reported as an unnecessary ignore error.
This is intended to prevent future regressions due to the same error recurring after being fixed.
When to Skip Tests
------------------
Although rare, there are reasons for skipping a sanity test instead of ignoring the errors it reports.
If a sanity test results in a traceback when processing content, that error cannot be ignored.
If this occurs, open a new `bug report <https://github.com/ansible/ansible/issues/new?template=bug_report.md>`_ for the issue so it can be fixed.
If the traceback occurs due to an issue with the content, that issue should be fixed.
If the content is correct, the test will need to be skipped until the bug in the sanity test is fixed.
Caution should be used when skipping sanity tests instead of ignoring them.
Since the test is skipped entirely, resolution of the issue will not be automatically detected.
This will prevent prevent regression detection from working once the issue has been resolved.
For this reason it is a good idea to periodically review skipped entries manually to verify they are required.
Ignore File Location
--------------------
The location of the ignore file depends on the type of content being tested.
Ansible Collections
^^^^^^^^^^^^^^^^^^^
Since sanity tests change between Ansible releases, a separate ignore file is needed for each Ansible major release.
The filename is ``tests/sanity/ignore-X.Y.txt`` where ``X.Y`` is the Ansible release being used to test the collection.
Maintaining a separate file for each Ansible release allows a collection to pass tests for multiple versions of Ansible.
Ansible
^^^^^^^
When testing Ansible, all ignores are placed in the ``test/sanity/ignore.txt`` file.
Only a single file is needed because ``ansible-test`` is developed and released as a part of Ansible itself.
Ignore File Format
------------------
The ignore file contains one entry per line.
Each line consists of two columns, separated by a single space.
Comments may be added at the end of an entry, started with a hash (``#``) character, which can be proceeded by zero or more spaces.
Blank and comment only lines are not allowed.
The first column specifies the file path that the entry applies to.
File paths must be relative to the root of the content being tested.
This is either the Ansible source or an Ansible collection.
File paths cannot contain a space or the hash (``#``) character.
The second column specifies the sanity test that the entry applies to.
This will be the name of the sanity test.
If the sanity test is specific to a version of Python, the name will include a dash (``-``) and the relevant Python version.
If the named test uses error codes then the error code to ignore must be appended to the name of the test, separated by a colon (``:``).
Below are some example ignore entries for an Ansible collection::
roles/my_role/files/my_script.sh shellcheck:SC2154 # ignore undefined variable
plugins/modules/my_module.py validate-modules:E105 # ignore license check
plugins/modules/my_module.py import-3.8 # needs update to support collections.abc on Python 3.8+
It is also possible to skip a sanity test for a specific file.
This is done by adding ``!skip`` after the sanity test name in the second column.
When this is done, no error code is included, even if the sanity test uses error codes.
Below are some example skip entries for an Ansible collection::
plugins/module_utils/my_util.py validate-modules!skip # waiting for bug fix in module validator
plugins/lookup/my_plugin.py compile-2.6!skip # Python 2.6 is not supported on the controller
Ignore File Errors
------------------
There are various errors that can be reported for the ignore file itself:
- syntax errors parsing the ignore file
- references a file path that does not exist
- references to a sanity test that does not exist
- ignoring an error that does not occur
- ignoring a file which is skipped
- duplicate entries
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,371 |
ansible.builtin.pause always reports echo: true as a result
|
### Summary
I was looking into avoiding recoding values prompted by the pause module in ara and I realised that the echo's return value is hardcoded to true, always has been according to git. The value is set to true in the result before actually checking the task arguments:
https://github.com/ansible/ansible/blob/b86a18bd273499f5d10e581821a47571690660e1/lib/ansible/plugins/action/pause.py#L136-L149
### Issue Type
Bug Report
### Component Name
pause
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.7]
config file = /home/demarteaub/projects/infra/ansible/ansible.cfg
configured module search path = ['/home/demarteaub/projects/infra/ansible/vendors/library', '/home/demarteaub/projects/infra/ansible/library']
ansible python module location = /home/demarteaub/.local/share/virtualenvs/ansible-JeMCc-2q/lib/python3.8/site-packages/ansible
ansible collection location = /home/demarteaub/projects/infra/ansible/vendors/collections:/home/demarteaub/projects/infra/ansible/collections
executable location = /home/demarteaub/.local/share/virtualenvs/ansible-JeMCc-2q/bin/ansible
python version = 3.8.12 (default, May 10 2022, 23:46:40) [GCC 8.5.0 20210514 (Red Hat 8.5.0-10)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
ANSIBLE_PIPELINING(/home/demarteaub/projects/infra/ansible/ansible.cfg) = True
CALLBACKS_ENABLED(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ['ansible.posix.profile_tasks', 'ara_default']
COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ignore
COLLECTIONS_PATHS(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ['/home/demarteaub/projects/infra/ansible/vendors/collections', '/home/demarteaub/projects/infra/ansible/collections']
DEFAULT_CALLBACK_PLUGIN_PATH(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ['/venv/lib/python3.8/site-packages/ara/plugins/callback', '/home/demarteaub/projects/infra/ansible/vendors/callbacks', '/home/demarteaub/projects/infra/ansible/library/callbacks']
DEFAULT_HOST_LIST(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ['/home/demarteaub/projects/infra/ansible/inventory/inventory.ini']
DEFAULT_MODULE_PATH(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ['/home/demarteaub/projects/infra/ansible/vendors/library', '/home/demarteaub/projects/infra/ansible/library']
DEFAULT_MODULE_UTILS_PATH(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ['/home/demarteaub/projects/infra/ansible/library/module_utils']
DEFAULT_ROLES_PATH(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ['/home/demarteaub/projects/infra/ansible/vendors/roles', '/home/demarteaub/projects/infra/ansible/roles']
DEFAULT_STDOUT_CALLBACK(/home/demarteaub/projects/infra/ansible/ansible.cfg) = yaml
DEFAULT_TIMEOUT(/home/demarteaub/projects/infra/ansible/ansible.cfg) = 5
DEFAULT_VAULT_ID_MATCH(/home/demarteaub/projects/infra/ansible/ansible.cfg) = true
DIFF_ALWAYS(/home/demarteaub/projects/infra/ansible/ansible.cfg) = True
INTERPRETER_PYTHON(/home/demarteaub/projects/infra/ansible/ansible.cfg) = /usr/bin/python3
INVENTORY_UNPARSED_IS_FAILED(/home/demarteaub/projects/infra/ansible/ansible.cfg) = True
BECOME:
======
CACHE:
=====
CALLBACK:
========
CLICONF:
=======
CONNECTION:
==========
ssh:
___
pipelining(/home/demarteaub/projects/infra/ansible/ansible.cfg) = True
reconnection_retries(/home/demarteaub/projects/infra/ansible/ansible.cfg) = 1
ssh_args(/home/demarteaub/projects/infra/ansible/ansible.cfg) = -o PreferredAuthentications=publickey
ssh_executable(/home/demarteaub/projects/infra/ansible/ansible.cfg) = /usr/bin/ssh
timeout(/home/demarteaub/projects/infra/ansible/ansible.cfg) = 5
HTTPAPI:
=======
INVENTORY:
=========
LOOKUP:
======
NETCONF:
=======
SHELL:
=====
VARS:
====
```
### OS / Environment
RockyLinux 8
### Steps to Reproduce
```shell
$ ansible -i localhost, -m pause -a echo=false -a prompt=plz all
[WARNING]: Skipping callback plugin 'ara_default', unable to load
[pause]
plz:
no^M
```
### Expected Results
```shell
localhost | SUCCESS => {
"changed": false,
"delta": 0,
"echo": false,
"rc": 0,
"start": "2022-07-28 15:49:13.411063",
"stderr": "",
"stdout": "Paused for 0.01 minutes",
"stop": "2022-07-28 15:49:14.200542",
"user_input": "no"
}
```
### Actual Results
```console
localhost | SUCCESS => {
"changed": false,
"delta": 0,
"echo": true,
"rc": 0,
"start": "2022-07-28 15:49:13.411063",
"stderr": "",
"stdout": "Paused for 0.01 minutes",
"stop": "2022-07-28 15:49:14.200542",
"user_input": "no"
}
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78371
|
https://github.com/ansible/ansible/pull/78378
|
b0b58fa7c2a5ae4bda1b186d40ba8bf0f09f6d01
|
e4890afc4e2af6c5126d056a2b4fdb1f1f122cf6
| 2022-07-28T13:50:18Z |
python
| 2022-07-29T13:11:20Z |
changelogs/fragments/pause_echo_fix.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,371 |
ansible.builtin.pause always reports echo: true as a result
|
### Summary
I was looking into avoiding recoding values prompted by the pause module in ara and I realised that the echo's return value is hardcoded to true, always has been according to git. The value is set to true in the result before actually checking the task arguments:
https://github.com/ansible/ansible/blob/b86a18bd273499f5d10e581821a47571690660e1/lib/ansible/plugins/action/pause.py#L136-L149
### Issue Type
Bug Report
### Component Name
pause
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.7]
config file = /home/demarteaub/projects/infra/ansible/ansible.cfg
configured module search path = ['/home/demarteaub/projects/infra/ansible/vendors/library', '/home/demarteaub/projects/infra/ansible/library']
ansible python module location = /home/demarteaub/.local/share/virtualenvs/ansible-JeMCc-2q/lib/python3.8/site-packages/ansible
ansible collection location = /home/demarteaub/projects/infra/ansible/vendors/collections:/home/demarteaub/projects/infra/ansible/collections
executable location = /home/demarteaub/.local/share/virtualenvs/ansible-JeMCc-2q/bin/ansible
python version = 3.8.12 (default, May 10 2022, 23:46:40) [GCC 8.5.0 20210514 (Red Hat 8.5.0-10)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
ANSIBLE_PIPELINING(/home/demarteaub/projects/infra/ansible/ansible.cfg) = True
CALLBACKS_ENABLED(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ['ansible.posix.profile_tasks', 'ara_default']
COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ignore
COLLECTIONS_PATHS(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ['/home/demarteaub/projects/infra/ansible/vendors/collections', '/home/demarteaub/projects/infra/ansible/collections']
DEFAULT_CALLBACK_PLUGIN_PATH(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ['/venv/lib/python3.8/site-packages/ara/plugins/callback', '/home/demarteaub/projects/infra/ansible/vendors/callbacks', '/home/demarteaub/projects/infra/ansible/library/callbacks']
DEFAULT_HOST_LIST(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ['/home/demarteaub/projects/infra/ansible/inventory/inventory.ini']
DEFAULT_MODULE_PATH(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ['/home/demarteaub/projects/infra/ansible/vendors/library', '/home/demarteaub/projects/infra/ansible/library']
DEFAULT_MODULE_UTILS_PATH(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ['/home/demarteaub/projects/infra/ansible/library/module_utils']
DEFAULT_ROLES_PATH(/home/demarteaub/projects/infra/ansible/ansible.cfg) = ['/home/demarteaub/projects/infra/ansible/vendors/roles', '/home/demarteaub/projects/infra/ansible/roles']
DEFAULT_STDOUT_CALLBACK(/home/demarteaub/projects/infra/ansible/ansible.cfg) = yaml
DEFAULT_TIMEOUT(/home/demarteaub/projects/infra/ansible/ansible.cfg) = 5
DEFAULT_VAULT_ID_MATCH(/home/demarteaub/projects/infra/ansible/ansible.cfg) = true
DIFF_ALWAYS(/home/demarteaub/projects/infra/ansible/ansible.cfg) = True
INTERPRETER_PYTHON(/home/demarteaub/projects/infra/ansible/ansible.cfg) = /usr/bin/python3
INVENTORY_UNPARSED_IS_FAILED(/home/demarteaub/projects/infra/ansible/ansible.cfg) = True
BECOME:
======
CACHE:
=====
CALLBACK:
========
CLICONF:
=======
CONNECTION:
==========
ssh:
___
pipelining(/home/demarteaub/projects/infra/ansible/ansible.cfg) = True
reconnection_retries(/home/demarteaub/projects/infra/ansible/ansible.cfg) = 1
ssh_args(/home/demarteaub/projects/infra/ansible/ansible.cfg) = -o PreferredAuthentications=publickey
ssh_executable(/home/demarteaub/projects/infra/ansible/ansible.cfg) = /usr/bin/ssh
timeout(/home/demarteaub/projects/infra/ansible/ansible.cfg) = 5
HTTPAPI:
=======
INVENTORY:
=========
LOOKUP:
======
NETCONF:
=======
SHELL:
=====
VARS:
====
```
### OS / Environment
RockyLinux 8
### Steps to Reproduce
```shell
$ ansible -i localhost, -m pause -a echo=false -a prompt=plz all
[WARNING]: Skipping callback plugin 'ara_default', unable to load
[pause]
plz:
no^M
```
### Expected Results
```shell
localhost | SUCCESS => {
"changed": false,
"delta": 0,
"echo": false,
"rc": 0,
"start": "2022-07-28 15:49:13.411063",
"stderr": "",
"stdout": "Paused for 0.01 minutes",
"stop": "2022-07-28 15:49:14.200542",
"user_input": "no"
}
```
### Actual Results
```console
localhost | SUCCESS => {
"changed": false,
"delta": 0,
"echo": true,
"rc": 0,
"start": "2022-07-28 15:49:13.411063",
"stderr": "",
"stdout": "Paused for 0.01 minutes",
"stop": "2022-07-28 15:49:14.200542",
"user_input": "no"
}
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78371
|
https://github.com/ansible/ansible/pull/78378
|
b0b58fa7c2a5ae4bda1b186d40ba8bf0f09f6d01
|
e4890afc4e2af6c5126d056a2b4fdb1f1f122cf6
| 2022-07-28T13:50:18Z |
python
| 2022-07-29T13:11:20Z |
lib/ansible/plugins/action/pause.py
|
# Copyright 2012, Tim Bielawa <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import signal
import sys
import termios
import time
import tty
from os import (
getpgrp,
isatty,
tcgetpgrp,
)
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
try:
import curses
import io
# Nest the try except since curses.error is not available if curses did not import
try:
curses.setupterm()
HAS_CURSES = True
except (curses.error, TypeError, io.UnsupportedOperation):
HAS_CURSES = False
except ImportError:
HAS_CURSES = False
MOVE_TO_BOL = b'\r'
CLEAR_TO_EOL = b'\x1b[K'
if HAS_CURSES:
# curses.tigetstr() returns None in some circumstances
MOVE_TO_BOL = curses.tigetstr('cr') or MOVE_TO_BOL
CLEAR_TO_EOL = curses.tigetstr('el') or CLEAR_TO_EOL
def setraw(fd, when=termios.TCSAFLUSH):
"""Put terminal into a raw mode.
Copied from ``tty`` from CPython 3.11.0, and modified to not remove OPOST from OFLAG
OPOST is kept to prevent an issue with multi line prompts from being corrupted now that display
is proxied via the queue from forks. The problem is a race condition, in that we proxy the display
over the fork, but before it can be displayed, this plugin will have continued executing, potentially
setting stdout and stdin to raw which remove output post processing that commonly converts NL to CRLF
"""
mode = termios.tcgetattr(fd)
mode[tty.IFLAG] = mode[tty.IFLAG] & ~(termios.BRKINT | termios.ICRNL | termios.INPCK | termios.ISTRIP | termios.IXON)
# mode[tty.OFLAG] = mode[tty.OFLAG] & ~(termios.OPOST)
mode[tty.CFLAG] = mode[tty.CFLAG] & ~(termios.CSIZE | termios.PARENB)
mode[tty.CFLAG] = mode[tty.CFLAG] | termios.CS8
mode[tty.LFLAG] = mode[tty.LFLAG] & ~(termios.ECHO | termios.ICANON | termios.IEXTEN | termios.ISIG)
mode[tty.CC][termios.VMIN] = 1
mode[tty.CC][termios.VTIME] = 0
termios.tcsetattr(fd, when, mode)
class AnsibleTimeoutExceeded(Exception):
pass
def timeout_handler(signum, frame):
raise AnsibleTimeoutExceeded
def clear_line(stdout):
stdout.write(b'\x1b[%s' % MOVE_TO_BOL)
stdout.write(b'\x1b[%s' % CLEAR_TO_EOL)
def is_interactive(fd=None):
if fd is None:
return False
if isatty(fd):
# Compare the current process group to the process group associated
# with terminal of the given file descriptor to determine if the process
# is running in the background.
return getpgrp() == tcgetpgrp(fd)
else:
return False
class ActionModule(ActionBase):
''' pauses execution for a length or time, or until input is received '''
BYPASS_HOST_LOOP = True
def run(self, tmp=None, task_vars=None):
''' run the pause action module '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
validation_result, new_module_args = self.validate_argument_spec(
argument_spec={
'echo': {'type': 'bool', 'default': True},
'minutes': {'type': int}, # Don't break backwards compat, allow floats, by using int callable
'seconds': {'type': int}, # Don't break backwards compat, allow floats, by using int callable
'prompt': {'type': 'str'},
},
mutually_exclusive=(
('minutes', 'seconds'),
),
)
duration_unit = 'minutes'
prompt = None
seconds = None
echo = True
echo_prompt = ''
result.update(dict(
changed=False,
rc=0,
stderr='',
stdout='',
start=None,
stop=None,
delta=None,
echo=echo
))
echo = new_module_args['echo']
# Add a note saying the output is hidden if echo is disabled
if not echo:
echo_prompt = ' (output is hidden)'
if new_module_args['prompt']:
prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), new_module_args['prompt'], echo_prompt)
else:
# If no custom prompt is specified, set a default prompt
prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), 'Press enter to continue, Ctrl+C to interrupt', echo_prompt)
if new_module_args['minutes'] is not None:
seconds = new_module_args['minutes'] * 60
elif new_module_args['seconds'] is not None:
seconds = new_module_args['seconds']
duration_unit = 'seconds'
########################################################################
# Begin the hard work!
start = time.time()
result['start'] = to_text(datetime.datetime.now())
result['user_input'] = b''
stdin_fd = None
old_settings = None
try:
if seconds is not None:
if seconds < 1:
seconds = 1
# setup the alarm handler
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(seconds)
# show the timer and control prompts
display.display("Pausing for %d seconds%s" % (seconds, echo_prompt))
display.display("(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r"),
# show the prompt specified in the task
if new_module_args['prompt']:
display.display(prompt)
else:
display.display(prompt)
# save the attributes on the existing (duped) stdin so
# that we can restore them later after we set raw mode
stdin_fd = None
stdout_fd = None
try:
stdin = self._connection._new_stdin.buffer
stdout = sys.stdout.buffer
stdin_fd = stdin.fileno()
stdout_fd = stdout.fileno()
except (ValueError, AttributeError):
# ValueError: someone is using a closed file descriptor as stdin
# AttributeError: someone is using a null file descriptor as stdin on windoze
stdin = None
interactive = is_interactive(stdin_fd)
if interactive:
# grab actual Ctrl+C sequence
try:
intr = termios.tcgetattr(stdin_fd)[6][termios.VINTR]
except Exception:
# unsupported/not present, use default
intr = b'\x03' # value for Ctrl+C
# get backspace sequences
try:
backspace = termios.tcgetattr(stdin_fd)[6][termios.VERASE]
except Exception:
backspace = [b'\x7f', b'\x08']
old_settings = termios.tcgetattr(stdin_fd)
setraw(stdin_fd)
# Only set stdout to raw mode if it is a TTY. This is needed when redirecting
# stdout to a file since a file cannot be set to raw mode.
if isatty(stdout_fd):
setraw(stdout_fd)
# Only echo input if no timeout is specified
if not seconds and echo:
new_settings = termios.tcgetattr(stdin_fd)
new_settings[3] = new_settings[3] | termios.ECHO
termios.tcsetattr(stdin_fd, termios.TCSANOW, new_settings)
# flush the buffer to make sure no previous key presses
# are read in below
termios.tcflush(stdin, termios.TCIFLUSH)
while True:
if not interactive:
if seconds is None:
display.warning("Not waiting for response to prompt as stdin is not interactive")
if seconds is not None:
# Give the signal handler enough time to timeout
time.sleep(seconds + 1)
break
try:
key_pressed = stdin.read(1)
if key_pressed == intr: # value for Ctrl+C
clear_line(stdout)
raise KeyboardInterrupt
if not seconds:
# read key presses and act accordingly
if key_pressed in (b'\r', b'\n'):
clear_line(stdout)
break
elif key_pressed in backspace:
# delete a character if backspace is pressed
result['user_input'] = result['user_input'][:-1]
clear_line(stdout)
if echo:
stdout.write(result['user_input'])
stdout.flush()
else:
result['user_input'] += key_pressed
except KeyboardInterrupt:
signal.alarm(0)
display.display("Press 'C' to continue the play or 'A' to abort \r"),
if self._c_or_a(stdin):
clear_line(stdout)
break
clear_line(stdout)
raise AnsibleError('user requested abort!')
except AnsibleTimeoutExceeded:
# this is the exception we expect when the alarm signal
# fires, so we simply ignore it to move into the cleanup
pass
finally:
# cleanup and save some information
# restore the old settings for the duped stdin stdin_fd
if not(None in (stdin_fd, old_settings)) and isatty(stdin_fd):
termios.tcsetattr(stdin_fd, termios.TCSADRAIN, old_settings)
duration = time.time() - start
result['stop'] = to_text(datetime.datetime.now())
result['delta'] = int(duration)
if duration_unit == 'minutes':
duration = round(duration / 60.0, 2)
else:
duration = round(duration, 2)
result['stdout'] = "Paused for %s %s" % (duration, duration_unit)
result['user_input'] = to_text(result['user_input'], errors='surrogate_or_strict')
return result
def _c_or_a(self, stdin):
while True:
key_pressed = stdin.read(1)
if key_pressed.lower() == b'a':
return False
elif key_pressed.lower() == b'c':
return True
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 70,180 |
Fatal error in ansible-galaxy collection list due to common SCM collection practice
|
##### SUMMARY
The command `ansible-galaxy collection list` should not _error_ when parsing a particular collection, because this prevents displaying a list of _all other_ collections.
This happens when simply using the source checkout of `community.vmware`.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
lib/ansible/cli/galaxy.py
##### ANSIBLE VERSION
```paste below
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out features under
development. This is a rapidly changing source of code and can become unstable at any point.
ansible 2.11.0.dev0
config file = None
configured module search path = ['/Users/alancoding/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/alancoding/Documents/repos/ansible/lib/ansible
executable location = /Users/alancoding/.virtualenvs/awx_collection/bin/ansible
python version = 3.7.7 (default, Mar 10 2020, 15:43:03) [Clang 11.0.0 (clang-1100.0.33.17)]
```
86606d3ca707e8836d2b956694bb55d3ae282aee
##### CONFIGURATION
```paste below
ansible-config dump --only-changed
```
(collection path modification in steps)
##### OS / ENVIRONMENT
N/A
##### STEPS TO REPRODUCE
```
base_dir=awx/plugins/collections/ansible_collections
mkdir -p $base_dir/community
git clone https://github.com/ansible-collections/vmware.git $base_dir/community/vmware
```
on the revision of this collection `2b277d44fa664443be679c13a66bdf2c60a75093`
```
ANSIBLE_COLLECTIONS_PATHS=awx/plugins/collections ansible-galaxy collection list -vvv
```
##### EXPECTED RESULTS
lists all the collections in the folder `awx/plugins/collections`, and gives `*` for the version of `community.vmware` because I don't expect that maintainers are necessarily including the version in their `galaxy.yml`.
In this particular case, the file is:
```yaml
---
namespace: community
name: vmware
# the version key is generated during the release by Zuul
# https://github.com/ansible-network/releases/tree/master/ansible_releases/cmd
# A script based on https://pypi.org/project/pbr/ will generate the version
# key. The version value depends on the tag or the last git tag.
readme: README.md
authors:
- Ansible (https://github.com/ansible)
description:
license_file: LICENSE
tags:
- cloud
- vmware
- virtualization
dependencies: {}
```
Of course they're doing this, they're using tags and CI in their release process. As an example of a collection with _no_ `galaxy.yml`
```
# /Users/alancoding/.ansible/collections/ansible_collections
Collection Version
---------------------- -----------
chrismeyersfsu.hax0r *
```
It should show the same thing for the vmware collection source checkout
##### ACTUAL RESULTS
```
ANSIBLE_COLLECTIONS_PATHS=awx/plugins/collections ansible-galaxy collection list -vvv
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out features under
development. This is a rapidly changing source of code and can become unstable at any point.
ansible-galaxy 2.11.0.dev0
config file = None
configured module search path = ['/Users/alancoding/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/alancoding/Documents/repos/ansible/lib/ansible
executable location = /Users/alancoding/.virtualenvs/awx_collection/bin/ansible-galaxy
python version = 3.7.7 (default, Mar 10 2020, 15:43:03) [Clang 11.0.0 (clang-1100.0.33.17)]
No config file found; using defaults
[DEPRECATION WARNING]: ANSIBLE_COLLECTIONS_PATHS option, all PATH-type options are singular PATH , use the "ANSIBLE_COLLECTIONS_PATH" environment variable instead. This feature
will be removed in version 2.14. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.
Searching /Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections for collections
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/alancoding-cloud-0.0.1.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/alancoding-cloud-0.0.3.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/.tox' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/build_artifact/openstack-cloud-0.0.1-dev82.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/alancoding-cloud-0.0.2.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/galaxy.yml' for collection build
Found installed collection alancoding.cloud:0.0.3 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/plugins/doc_fragments/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/galaxy.yml' for collection build
Found installed collection amazon.aws:0.1.0 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/plugins/doc_fragments/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/galaxy.yml' for collection build
Found installed collection azure.azcollection:0.1.3 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud/galaxy.yml' for collection build
Found installed collection google.cloud:0.0.9 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/posix/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/posix/galaxy.yml' for collection build
Found installed collection ansible.posix:0.1.1 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/posix'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/amazon/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/amazon/galaxy.yml' for collection build
Found installed collection ansible.amazon:0.1.0 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/amazon'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/theforeman/foreman/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/theforeman/foreman/galaxy.yml' for collection build
Found installed collection theforeman.foreman:0.7.0 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/theforeman/foreman'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/general/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/general/galaxy.yml' for collection build
Found installed collection community.general:0.1.1 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/general'
ERROR! The collection galaxy.yml at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/vmware/galaxy.yml' is missing the following mandatory keys: version
```
It's bad that the command terminates so fragilely. This also just doesn't jive with the common, published, policies of first-class collections.
It's also bad that it won't give a traceback for the error that happened.
FYI to @Akasurde (I'd expect no change in vmware collection) @jborean93 (most likely expect change in CLI error handling)
|
https://github.com/ansible/ansible/issues/70180
|
https://github.com/ansible/ansible/pull/76596
|
9b79d6ba3582bb0ef339738b7e1ade879d00dfe3
|
05608b20e8f875d51866a184f8c579fe60498e05
| 2020-06-19T19:09:14Z |
python
| 2022-08-02T15:46:47Z |
changelogs/fragments/70180-collection-list-more-robust.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 70,180 |
Fatal error in ansible-galaxy collection list due to common SCM collection practice
|
##### SUMMARY
The command `ansible-galaxy collection list` should not _error_ when parsing a particular collection, because this prevents displaying a list of _all other_ collections.
This happens when simply using the source checkout of `community.vmware`.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
lib/ansible/cli/galaxy.py
##### ANSIBLE VERSION
```paste below
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out features under
development. This is a rapidly changing source of code and can become unstable at any point.
ansible 2.11.0.dev0
config file = None
configured module search path = ['/Users/alancoding/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/alancoding/Documents/repos/ansible/lib/ansible
executable location = /Users/alancoding/.virtualenvs/awx_collection/bin/ansible
python version = 3.7.7 (default, Mar 10 2020, 15:43:03) [Clang 11.0.0 (clang-1100.0.33.17)]
```
86606d3ca707e8836d2b956694bb55d3ae282aee
##### CONFIGURATION
```paste below
ansible-config dump --only-changed
```
(collection path modification in steps)
##### OS / ENVIRONMENT
N/A
##### STEPS TO REPRODUCE
```
base_dir=awx/plugins/collections/ansible_collections
mkdir -p $base_dir/community
git clone https://github.com/ansible-collections/vmware.git $base_dir/community/vmware
```
on the revision of this collection `2b277d44fa664443be679c13a66bdf2c60a75093`
```
ANSIBLE_COLLECTIONS_PATHS=awx/plugins/collections ansible-galaxy collection list -vvv
```
##### EXPECTED RESULTS
lists all the collections in the folder `awx/plugins/collections`, and gives `*` for the version of `community.vmware` because I don't expect that maintainers are necessarily including the version in their `galaxy.yml`.
In this particular case, the file is:
```yaml
---
namespace: community
name: vmware
# the version key is generated during the release by Zuul
# https://github.com/ansible-network/releases/tree/master/ansible_releases/cmd
# A script based on https://pypi.org/project/pbr/ will generate the version
# key. The version value depends on the tag or the last git tag.
readme: README.md
authors:
- Ansible (https://github.com/ansible)
description:
license_file: LICENSE
tags:
- cloud
- vmware
- virtualization
dependencies: {}
```
Of course they're doing this, they're using tags and CI in their release process. As an example of a collection with _no_ `galaxy.yml`
```
# /Users/alancoding/.ansible/collections/ansible_collections
Collection Version
---------------------- -----------
chrismeyersfsu.hax0r *
```
It should show the same thing for the vmware collection source checkout
##### ACTUAL RESULTS
```
ANSIBLE_COLLECTIONS_PATHS=awx/plugins/collections ansible-galaxy collection list -vvv
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out features under
development. This is a rapidly changing source of code and can become unstable at any point.
ansible-galaxy 2.11.0.dev0
config file = None
configured module search path = ['/Users/alancoding/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/alancoding/Documents/repos/ansible/lib/ansible
executable location = /Users/alancoding/.virtualenvs/awx_collection/bin/ansible-galaxy
python version = 3.7.7 (default, Mar 10 2020, 15:43:03) [Clang 11.0.0 (clang-1100.0.33.17)]
No config file found; using defaults
[DEPRECATION WARNING]: ANSIBLE_COLLECTIONS_PATHS option, all PATH-type options are singular PATH , use the "ANSIBLE_COLLECTIONS_PATH" environment variable instead. This feature
will be removed in version 2.14. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.
Searching /Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections for collections
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/alancoding-cloud-0.0.1.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/alancoding-cloud-0.0.3.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/.tox' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/build_artifact/openstack-cloud-0.0.1-dev82.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/alancoding-cloud-0.0.2.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/galaxy.yml' for collection build
Found installed collection alancoding.cloud:0.0.3 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/plugins/doc_fragments/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/galaxy.yml' for collection build
Found installed collection amazon.aws:0.1.0 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/plugins/doc_fragments/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/galaxy.yml' for collection build
Found installed collection azure.azcollection:0.1.3 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud/galaxy.yml' for collection build
Found installed collection google.cloud:0.0.9 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/posix/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/posix/galaxy.yml' for collection build
Found installed collection ansible.posix:0.1.1 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/posix'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/amazon/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/amazon/galaxy.yml' for collection build
Found installed collection ansible.amazon:0.1.0 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/amazon'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/theforeman/foreman/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/theforeman/foreman/galaxy.yml' for collection build
Found installed collection theforeman.foreman:0.7.0 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/theforeman/foreman'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/general/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/general/galaxy.yml' for collection build
Found installed collection community.general:0.1.1 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/general'
ERROR! The collection galaxy.yml at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/vmware/galaxy.yml' is missing the following mandatory keys: version
```
It's bad that the command terminates so fragilely. This also just doesn't jive with the common, published, policies of first-class collections.
It's also bad that it won't give a traceback for the error that happened.
FYI to @Akasurde (I'd expect no change in vmware collection) @jborean93 (most likely expect change in CLI error handling)
|
https://github.com/ansible/ansible/issues/70180
|
https://github.com/ansible/ansible/pull/76596
|
9b79d6ba3582bb0ef339738b7e1ade879d00dfe3
|
05608b20e8f875d51866a184f8c579fe60498e05
| 2020-06-19T19:09:14Z |
python
| 2022-08-02T15:46:47Z |
lib/ansible/cli/galaxy.py
|
#!/usr/bin/env python
# Copyright: (c) 2013, James Cammarata <[email protected]>
# Copyright: (c) 2018-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
import json
import os.path
import re
import shutil
import sys
import textwrap
import time
from yaml.error import YAMLError
import ansible.constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection import (
build_collection,
download_collections,
find_existing_collections,
install_collections,
publish_collection,
validate_collection_name,
validate_collection_path,
verify_collections,
SIGNATURE_COUNT_RE,
)
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
from ansible.galaxy.collection.gpg import GPG_ERROR_MAP
from ansible.galaxy.dependency_resolution.dataclasses import Requirement
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken, NoTokenSentinel
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.common.collections import is_iterable
from ansible.module_utils.common.yaml import yaml_dump, yaml_load
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils import six
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.playbook.role.requirement import RoleRequirement
from ansible.template import Templar
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
display = Display()
urlparse = six.moves.urllib.parse.urlparse
# config definition by position: name, required, type
SERVER_DEF = [
('url', True, 'str'),
('username', False, 'str'),
('password', False, 'str'),
('token', False, 'str'),
('auth_url', False, 'str'),
('v3', False, 'bool'),
('validate_certs', False, 'bool'),
('client_id', False, 'str'),
('timeout', False, 'int'),
]
# config definition fields
SERVER_ADDITIONAL = {
'v3': {'default': 'False'},
'validate_certs': {'default': True, 'cli': [{'name': 'validate_certs'}]},
'timeout': {'default': '60', 'cli': [{'name': 'timeout'}]},
'token': {'default': None},
}
# override default if the generic is set
if C.GALAXY_IGNORE_CERTS is not None:
SERVER_ADDITIONAL['validate_certs'].update({'default': not C.GALAXY_IGNORE_CERTS})
def with_collection_artifacts_manager(wrapped_method):
"""Inject an artifacts manager if not passed explicitly.
This decorator constructs a ConcreteArtifactsManager and maintains
the related temporary directory auto-cleanup around the target
method invocation.
"""
def method_wrapper(*args, **kwargs):
if 'artifacts_manager' in kwargs:
return wrapped_method(*args, **kwargs)
artifacts_manager_kwargs = {'validate_certs': context.CLIARGS['validate_certs']}
keyring = context.CLIARGS.get('keyring', None)
if keyring is not None:
artifacts_manager_kwargs.update({
'keyring': GalaxyCLI._resolve_path(keyring),
'required_signature_count': context.CLIARGS.get('required_valid_signature_count', None),
'ignore_signature_errors': context.CLIARGS.get('ignore_gpg_errors', None),
})
with ConcreteArtifactsManager.under_tmpdir(
C.DEFAULT_LOCAL_TMP,
**artifacts_manager_kwargs
) as concrete_artifact_cm:
kwargs['artifacts_manager'] = concrete_artifact_cm
return wrapped_method(*args, **kwargs)
return method_wrapper
def _display_header(path, h1, h2, w1=10, w2=7):
display.display('\n# {0}\n{1:{cwidth}} {2:{vwidth}}\n{3} {4}\n'.format(
path,
h1,
h2,
'-' * max([len(h1), w1]), # Make sure that the number of dashes is at least the width of the header
'-' * max([len(h2), w2]),
cwidth=w1,
vwidth=w2,
))
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
def _display_collection(collection, cwidth=10, vwidth=7, min_cwidth=10, min_vwidth=7):
display.display('{fqcn:{cwidth}} {version:{vwidth}}'.format(
fqcn=to_text(collection.fqcn),
version=collection.ver,
cwidth=max(cwidth, min_cwidth), # Make sure the width isn't smaller than the header
vwidth=max(vwidth, min_vwidth)
))
def _get_collection_widths(collections):
if not is_iterable(collections):
collections = (collections, )
fqcn_set = {to_text(c.fqcn) for c in collections}
version_set = {to_text(c.ver) for c in collections}
fqcn_length = len(max(fqcn_set, key=len))
version_length = len(max(version_set, key=len))
return fqcn_length, version_length
def validate_signature_count(value):
match = re.match(SIGNATURE_COUNT_RE, value)
if match is None:
raise ValueError(f"{value} is not a valid signature count value")
return value
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
name = 'ansible-galaxy'
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
def __init__(self, args):
self._raw_args = args
self._implicit_role = False
if len(args) > 1:
# Inject role into sys.argv[1] as a backwards compatibility step
if args[1] not in ['-h', '--help', '--version'] and 'role' not in args and 'collection' not in args:
# TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
args.insert(1, 'role')
self._implicit_role = True
# since argparse doesn't allow hidden subparsers, handle dead login arg from raw args after "role" normalization
if args[1:3] == ['role', 'login']:
display.error(
"The login command was removed in late 2020. An API key is now required to publish roles or collections "
"to Galaxy. The key can be found at https://galaxy.ansible.com/me/preferences, and passed to the "
"ansible-galaxy CLI via a file at {0} or (insecurely) via the `--token` "
"command-line argument.".format(to_text(C.GALAXY_TOKEN_PATH)))
sys.exit(1)
self.api_servers = []
self.galaxy = None
self._api = None
super(GalaxyCLI, self).__init__(args)
def init_parser(self):
''' create an options parser for bin/ansible '''
super(GalaxyCLI, self).init_parser(
desc="Perform various Role and Collection related operations.",
)
# Common arguments that apply to more than 1 action
common = opt_help.argparse.ArgumentParser(add_help=False)
common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL')
common.add_argument('--token', '--api-key', dest='api_key',
help='The Ansible Galaxy API key which can be found at '
'https://galaxy.ansible.com/me/preferences.')
common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs', help='Ignore SSL certificate validation errors.', default=None)
common.add_argument('--timeout', dest='timeout', type=int,
help="The time to wait for operations against the galaxy server, defaults to 60s.")
opt_help.add_verbosity_options(common)
force = opt_help.argparse.ArgumentParser(add_help=False)
force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role or collection')
github = opt_help.argparse.ArgumentParser(add_help=False)
github.add_argument('github_user', help='GitHub username')
github.add_argument('github_repo', help='GitHub repository')
offline = opt_help.argparse.ArgumentParser(add_help=False)
offline.add_argument('--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
roles_path = opt_help.argparse.ArgumentParser(add_help=False)
roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
help='The path to the directory containing your roles. The default is the first '
'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
collections_path = opt_help.argparse.ArgumentParser(add_help=False)
collections_path.add_argument('-p', '--collections-path', dest='collections_path', type=opt_help.unfrack_path(pathsep=True),
default=AnsibleCollectionConfig.collection_paths,
action=opt_help.PrependListAction,
help="One or more directories to search for collections in addition "
"to the default COLLECTIONS_PATHS. Separate multiple paths "
"with '{0}'.".format(os.path.pathsep))
cache_options = opt_help.argparse.ArgumentParser(add_help=False)
cache_options.add_argument('--clear-response-cache', dest='clear_response_cache', action='store_true',
default=False, help='Clear the existing server response cache.')
cache_options.add_argument('--no-cache', dest='no_cache', action='store_true', default=False,
help='Do not use the server response cache.')
# Add sub parser for the Galaxy role type (role or collection)
type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
type_parser.required = True
# Add sub parser for the Galaxy collection actions
collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
collection_parser.required = True
self.add_download_options(collection_parser, parents=[common, cache_options])
self.add_init_options(collection_parser, parents=[common, force])
self.add_build_options(collection_parser, parents=[common, force])
self.add_publish_options(collection_parser, parents=[common])
self.add_install_options(collection_parser, parents=[common, force, cache_options])
self.add_list_options(collection_parser, parents=[common, collections_path])
self.add_verify_options(collection_parser, parents=[common, collections_path])
# Add sub parser for the Galaxy role actions
role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.')
role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action')
role_parser.required = True
self.add_init_options(role_parser, parents=[common, force, offline])
self.add_remove_options(role_parser, parents=[common, roles_path])
self.add_delete_options(role_parser, parents=[common, github])
self.add_list_options(role_parser, parents=[common, roles_path])
self.add_search_options(role_parser, parents=[common])
self.add_import_options(role_parser, parents=[common, github])
self.add_setup_options(role_parser, parents=[common, roles_path])
self.add_info_options(role_parser, parents=[common, roles_path, offline])
self.add_install_options(role_parser, parents=[common, force, roles_path])
def add_download_options(self, parser, parents=None):
download_parser = parser.add_parser('download', parents=parents,
help='Download collections and their dependencies as a tarball for an '
'offline install.')
download_parser.set_defaults(func=self.execute_download)
download_parser.add_argument('args', help='Collection(s)', metavar='collection', nargs='*')
download_parser.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download collection(s) listed as dependencies.")
download_parser.add_argument('-p', '--download-path', dest='download_path',
default='./collections',
help='The directory to download the collections to.')
download_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be downloaded.')
download_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
def add_init_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
init_parser = parser.add_parser('init', parents=parents,
help='Initialize new {0} with the base structure of a '
'{0}.'.format(galaxy_type))
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--init-path', dest='init_path', default='./',
help='The path in which the skeleton {0} will be created. The default is the '
'current working directory.'.format(galaxy_type))
init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type),
default=C.GALAXY_COLLECTION_SKELETON if galaxy_type == 'collection' else C.GALAXY_ROLE_SKELETON,
help='The path to a {0} skeleton that the new {0} should be based '
'upon.'.format(galaxy_type))
obj_name_kwargs = {}
if galaxy_type == 'collection':
obj_name_kwargs['type'] = validate_collection_name
init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()),
**obj_name_kwargs)
if galaxy_type == 'role':
init_parser.add_argument('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', "
"'apb' and 'network'.")
def add_remove_options(self, parser, parents=None):
remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
remove_parser.set_defaults(func=self.execute_remove)
remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
def add_delete_options(self, parser, parents=None):
delete_parser = parser.add_parser('delete', parents=parents,
help='Removes the role from Galaxy. It does not remove or alter the actual '
'GitHub repository.')
delete_parser.set_defaults(func=self.execute_delete)
def add_list_options(self, parser, parents=None):
galaxy_type = 'role'
if parser.metavar == 'COLLECTION_ACTION':
galaxy_type = 'collection'
list_parser = parser.add_parser('list', parents=parents,
help='Show the name and version of each {0} installed in the {0}s_path.'.format(galaxy_type))
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument(galaxy_type, help=galaxy_type.capitalize(), nargs='?', metavar=galaxy_type)
if galaxy_type == 'collection':
list_parser.add_argument('--format', dest='output_format', choices=('human', 'yaml', 'json'), default='human',
help="Format to display the list of collections in.")
def add_search_options(self, parser, parents=None):
search_parser = parser.add_parser('search', parents=parents,
help='Search the Galaxy database by tags, platforms, author and multiple '
'keywords.')
search_parser.set_defaults(func=self.execute_search)
search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
search_parser.add_argument('--author', dest='author', help='GitHub username')
search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
def add_import_options(self, parser, parents=None):
import_parser = parser.add_parser('import', parents=parents, help='Import a role into a galaxy server')
import_parser.set_defaults(func=self.execute_import)
import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import results.")
import_parser.add_argument('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch '
'(usually master)')
import_parser.add_argument('--role-name', dest='role_name',
help='The name the role should have, if different than the repo name')
import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_'
'user/github_repo.')
def add_setup_options(self, parser, parents=None):
setup_parser = parser.add_parser('setup', parents=parents,
help='Manage the integration between Galaxy and the given source.')
setup_parser.set_defaults(func=self.execute_setup)
setup_parser.add_argument('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see '
'ID values.')
setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False,
help='List all of your integrations.')
setup_parser.add_argument('source', help='Source')
setup_parser.add_argument('github_user', help='GitHub username')
setup_parser.add_argument('github_repo', help='GitHub repository')
setup_parser.add_argument('secret', help='Secret')
def add_info_options(self, parser, parents=None):
info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.')
info_parser.set_defaults(func=self.execute_info)
info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
def add_verify_options(self, parser, parents=None):
galaxy_type = 'collection'
verify_parser = parser.add_parser('verify', parents=parents, help='Compare checksums with the collection(s) '
'found on the server and the installed copy. This does not verify dependencies.')
verify_parser.set_defaults(func=self.execute_verify)
verify_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', help='The installed collection(s) name. '
'This is mutually exclusive with --requirements-file.')
verify_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors during verification and continue with the next specified collection.')
verify_parser.add_argument('--offline', dest='offline', action='store_true', default=False,
help='Validate collection integrity locally without contacting server for '
'canonical manifest hash.')
verify_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be verified.')
verify_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
help='The keyring used during signature verification') # Eventually default to ~/.ansible/pubring.kbx?
verify_parser.add_argument('--signature', dest='signatures', action='append',
help='An additional signature source to verify the authenticity of the MANIFEST.json before using '
'it to verify the rest of the contents of a collection from a Galaxy server. Use in '
'conjunction with a positional collection name (mutually exclusive with --requirements-file).')
valid_signature_count_help = 'The number of signatures that must successfully verify the collection. This should be a positive integer ' \
'or all to signify that all signatures must be used to verify the collection. ' \
'Prepend the value with + to fail if no valid signatures are found for the collection (e.g. +all).'
ignore_gpg_status_help = 'A status code to ignore during signature verification (for example, NO_PUBKEY). ' \
'Provide this option multiple times to ignore a list of status codes. ' \
'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).'
verify_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
verify_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
def add_install_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
args_kwargs = {}
if galaxy_type == 'collection':
args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \
'mutually exclusive with --requirements-file.'
ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
'collection. This will not ignore dependency conflict errors.'
else:
args_kwargs['help'] = 'Role name, URL or tar file'
ignore_errors_help = 'Ignore errors and continue with the next specified role.'
install_parser = parser.add_parser('install', parents=parents,
help='Install {0}(s) from file(s), URL(s) or Ansible '
'Galaxy'.format(galaxy_type))
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help=ignore_errors_help)
install_exclusive = install_parser.add_mutually_exclusive_group()
install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download {0}s listed as dependencies.".format(galaxy_type))
install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing {0} and its "
"dependencies.".format(galaxy_type))
valid_signature_count_help = 'The number of signatures that must successfully verify the collection. This should be a positive integer ' \
'or -1 to signify that all signatures must be used to verify the collection. ' \
'Prepend the value with + to fail if no valid signatures are found for the collection (e.g. +all).'
ignore_gpg_status_help = 'A status code to ignore during signature verification (for example, NO_PUBKEY). ' \
'Provide this option multiple times to ignore a list of status codes. ' \
'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).'
if galaxy_type == 'collection':
install_parser.add_argument('-p', '--collections-path', dest='collections_path',
default=self._get_default_collection_path(),
help='The path to the directory containing your collections.')
install_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be installed.')
install_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
install_parser.add_argument('-U', '--upgrade', dest='upgrade', action='store_true', default=False,
help='Upgrade installed collection artifacts. This will also update dependencies unless --no-deps is provided')
install_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
help='The keyring used during signature verification') # Eventually default to ~/.ansible/pubring.kbx?
install_parser.add_argument('--disable-gpg-verify', dest='disable_gpg_verify', action='store_true',
default=C.GALAXY_DISABLE_GPG_VERIFY,
help='Disable GPG signature verification when installing collections from a Galaxy server')
install_parser.add_argument('--signature', dest='signatures', action='append',
help='An additional signature source to verify the authenticity of the MANIFEST.json before '
'installing the collection from a Galaxy server. Use in conjunction with a positional '
'collection name (mutually exclusive with --requirements-file).')
install_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
install_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
else:
install_parser.add_argument('-r', '--role-file', dest='requirements',
help='A file containing a list of roles to be installed.')
if self._implicit_role and ('-r' in self._raw_args or '--role-file' in self._raw_args):
# Any collections in the requirements files will also be installed
install_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
help='The keyring used during collection signature verification')
install_parser.add_argument('--disable-gpg-verify', dest='disable_gpg_verify', action='store_true',
default=C.GALAXY_DISABLE_GPG_VERIFY,
help='Disable GPG signature verification when installing collections from a Galaxy server')
install_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
install_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False,
help='Use tar instead of the scm archive option when packaging the role.')
def add_build_options(self, parser, parents=None):
build_parser = parser.add_parser('build', parents=parents,
help='Build an Ansible collection artifact that can be published to Ansible '
'Galaxy.')
build_parser.set_defaults(func=self.execute_build)
build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',),
help='Path to the collection(s) directory to build. This should be the directory '
'that contains the galaxy.yml file. The default is the current working '
'directory.')
build_parser.add_argument('--output-path', dest='output_path', default='./',
help='The path in which the collection is built to. The default is the current '
'working directory.')
def add_publish_options(self, parser, parents=None):
publish_parser = parser.add_parser('publish', parents=parents,
help='Publish a collection artifact to Ansible Galaxy.')
publish_parser.set_defaults(func=self.execute_publish)
publish_parser.add_argument('args', metavar='collection_path',
help='The path to the collection tarball to publish.')
publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import validation results.")
publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0,
help="The time to wait for the collection import process to finish.")
def post_process_args(self, options):
options = super(GalaxyCLI, self).post_process_args(options)
# ensure we have 'usable' cli option
setattr(options, 'validate_certs', (None if options.ignore_certs is None else not options.ignore_certs))
display.verbosity = options.verbosity
return options
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
def server_config_def(section, key, required, option_type):
config_def = {
'description': 'The %s of the %s Galaxy server' % (key, section),
'ini': [
{
'section': 'galaxy_server.%s' % section,
'key': key,
}
],
'env': [
{'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
],
'required': required,
'type': option_type,
}
if key in SERVER_ADDITIONAL:
config_def.update(SERVER_ADDITIONAL[key])
return config_def
galaxy_options = {}
for optional_key in ['clear_response_cache', 'no_cache', 'timeout']:
if optional_key in context.CLIARGS:
galaxy_options[optional_key] = context.CLIARGS[optional_key]
config_servers = []
# Need to filter out empty strings or non truthy values as an empty server list env var is equal to [''].
server_list = [s for s in C.GALAXY_SERVER_LIST or [] if s]
for server_priority, server_key in enumerate(server_list, start=1):
# Abuse the 'plugin config' by making 'galaxy_server' a type of plugin
# Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
# section [galaxy_server.<server>] for the values url, username, password, and token.
config_dict = dict((k, server_config_def(server_key, k, req, ensure_type)) for k, req, ensure_type in SERVER_DEF)
defs = AnsibleLoader(yaml_dump(config_dict)).get_single_data()
C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
# resolve the config created options above with existing config and user options
server_options = C.config.get_plugin_options('galaxy_server', server_key)
# auth_url is used to create the token, but not directly by GalaxyAPI, so
# it doesn't need to be passed as kwarg to GalaxyApi, same for others we pop here
auth_url = server_options.pop('auth_url')
client_id = server_options.pop('client_id')
token_val = server_options['token'] or NoTokenSentinel
username = server_options['username']
v3 = server_options.pop('v3')
validate_certs = server_options['validate_certs']
if v3:
# This allows a user to explicitly indicate the server uses the /v3 API
# This was added for testing against pulp_ansible and I'm not sure it has
# a practical purpose outside of this use case. As such, this option is not
# documented as of now
server_options['available_api_versions'] = {'v3': '/v3'}
# default case if no auth info is provided.
server_options['token'] = None
if username:
server_options['token'] = BasicAuthToken(username, server_options['password'])
else:
if token_val:
if auth_url:
server_options['token'] = KeycloakToken(access_token=token_val,
auth_url=auth_url,
validate_certs=validate_certs,
client_id=client_id)
else:
# The galaxy v1 / github / django / 'Token'
server_options['token'] = GalaxyToken(token=token_val)
server_options.update(galaxy_options)
config_servers.append(GalaxyAPI(
self.galaxy, server_key,
priority=server_priority,
**server_options
))
cmd_server = context.CLIARGS['api_server']
cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
# resolve validate_certs
v_config_default = True if C.GALAXY_IGNORE_CERTS is None else not C.GALAXY_IGNORE_CERTS
validate_certs = v_config_default if context.CLIARGS['validate_certs'] is None else context.CLIARGS['validate_certs']
if cmd_server:
# Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
# entry, otherwise create a new API entry for the server specified.
config_server = next((s for s in config_servers if s.name == cmd_server), None)
if config_server:
self.api_servers.append(config_server)
else:
self.api_servers.append(GalaxyAPI(
self.galaxy, 'cmd_arg', cmd_server, token=cmd_token,
priority=len(config_servers) + 1,
validate_certs=validate_certs,
**galaxy_options
))
else:
self.api_servers = config_servers
# Default to C.GALAXY_SERVER if no servers were defined
if len(self.api_servers) == 0:
self.api_servers.append(GalaxyAPI(
self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token,
priority=0,
validate_certs=validate_certs,
**galaxy_options
))
return context.CLIARGS['func']()
@property
def api(self):
if self._api:
return self._api
for server in self.api_servers:
try:
if u'v1' in server.available_api_versions:
self._api = server
break
except Exception:
continue
if not self._api:
self._api = self.api_servers[0]
return self._api
def _get_default_collection_path(self):
return C.COLLECTIONS_PATHS[0]
def _parse_requirements_file(self, requirements_file, allow_old_format=True, artifacts_manager=None, validate_signature_options=True):
"""
Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
requirements file format:
# v1 (roles only)
- src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball.
name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL.
scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git.
version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master.
include: Path to additional requirements.yml files.
# v2 (roles and collections)
---
roles:
# Same as v1 format just under the roles key
collections:
- namespace.collection
- name: namespace.collection
version: version identifier, multiple identifiers are separated by ','
source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST
type: git|file|url|galaxy
:param requirements_file: The path to the requirements file.
:param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
:param artifacts_manager: Artifacts manager.
:return: a dict containing roles and collections to found in the requirements file.
"""
requirements = {
'roles': [],
'collections': [],
}
b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict')
if not os.path.exists(b_requirements_file):
raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file))
display.vvv("Reading requirement file at '%s'" % requirements_file)
with open(b_requirements_file, 'rb') as req_obj:
try:
file_requirements = yaml_load(req_obj)
except YAMLError as err:
raise AnsibleError(
"Failed to parse the requirements yml at '%s' with the following error:\n%s"
% (to_native(requirements_file), to_native(err)))
if file_requirements is None:
raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file))
def parse_role_req(requirement):
if "include" not in requirement:
role = RoleRequirement.role_yaml_parse(requirement)
display.vvv("found role %s in yaml file" % to_text(role))
if "name" not in role and "src" not in role:
raise AnsibleError("Must specify name or src for role")
return [GalaxyRole(self.galaxy, self.api, **role)]
else:
b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict")
if not os.path.isfile(b_include_path):
raise AnsibleError("Failed to find include requirements file '%s' in '%s'"
% (to_native(b_include_path), to_native(requirements_file)))
with open(b_include_path, 'rb') as f_include:
try:
return [GalaxyRole(self.galaxy, self.api, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml_load(f_include))]
except Exception as e:
raise AnsibleError("Unable to load data from include requirements file: %s %s"
% (to_native(requirements_file), to_native(e)))
if isinstance(file_requirements, list):
# Older format that contains only roles
if not allow_old_format:
raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains "
"a list of collections to install")
for role_req in file_requirements:
requirements['roles'] += parse_role_req(role_req)
else:
# Newer format with a collections and/or roles key
extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections']))
if extra_keys:
raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements "
"file. Found: %s" % (to_native(", ".join(extra_keys))))
for role_req in file_requirements.get('roles') or []:
requirements['roles'] += parse_role_req(role_req)
requirements['collections'] = [
Requirement.from_requirement_dict(
self._init_coll_req_dict(collection_req),
artifacts_manager,
validate_signature_options,
)
for collection_req in file_requirements.get('collections') or []
]
return requirements
def _init_coll_req_dict(self, coll_req):
if not isinstance(coll_req, dict):
# Assume it's a string:
return {'name': coll_req}
if (
'name' not in coll_req or
not coll_req.get('source') or
coll_req.get('type', 'galaxy') != 'galaxy'
):
return coll_req
# Try and match up the requirement source with our list of Galaxy API
# servers defined in the config, otherwise create a server with that
# URL without any auth.
coll_req['source'] = next(
iter(
srvr for srvr in self.api_servers
if coll_req['source'] in {srvr.name, srvr.api_server}
),
GalaxyAPI(
self.galaxy,
'explicit_requirement_{name!s}'.format(
name=coll_req['name'],
),
coll_req['source'],
validate_certs=not context.CLIARGS['ignore_certs'],
),
)
return coll_req
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
# Get the top-level 'description' first, falling back to galaxy_info['galaxy_info']['description'].
galaxy_info = role_info.get('galaxy_info', {})
description = role_info.get('description', galaxy_info.get('description', ''))
text.append(u"\tdescription: %s" % description)
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
# make sure we have a trailing newline returned
text.append(u"")
return u'\n'.join(text)
@staticmethod
def _resolve_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
@staticmethod
def _get_skeleton_galaxy_yml(template_path, inject_data):
with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
galaxy_meta = get_collections_galaxy_meta_info()
required_config = []
optional_config = []
for meta_entry in galaxy_meta:
config_list = required_config if meta_entry.get('required', False) else optional_config
value = inject_data.get(meta_entry['key'], None)
if not value:
meta_type = meta_entry.get('type', 'str')
if meta_type == 'str':
value = ''
elif meta_type == 'list':
value = []
elif meta_type == 'dict':
value = {}
meta_entry['value'] = value
config_list.append(meta_entry)
link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
const_pattern = re.compile(r"C\(([^)]+)\)")
def comment_ify(v):
if isinstance(v, list):
v = ". ".join([l.rstrip('.') for l in v])
v = link_pattern.sub(r"\1 <\2>", v)
v = const_pattern.sub(r"'\1'", v)
return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
loader = DataLoader()
templar = Templar(loader, variables={'required_config': required_config, 'optional_config': optional_config})
templar.environment.filters['comment_ify'] = comment_ify
meta_value = templar.template(meta_template)
return meta_value
def _require_one_of_collections_requirements(
self, collections, requirements_file,
signatures=None,
artifacts_manager=None,
):
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
elif requirements_file:
if signatures is not None:
raise AnsibleError(
"The --signatures option and --requirements-file are mutually exclusive. "
"Use the --signatures with positional collection_name args or provide a "
"'signatures' key for requirements in the --requirements-file."
)
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._parse_requirements_file(
requirements_file,
allow_old_format=False,
artifacts_manager=artifacts_manager,
)
else:
requirements = {
'collections': [
Requirement.from_string(coll_input, artifacts_manager, signatures)
for coll_input in collections
],
'roles': [],
}
return requirements
############################
# execute actions
############################
def execute_role(self):
"""
Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
as listed below.
"""
# To satisfy doc build
pass
def execute_collection(self):
"""
Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
listed below.
"""
# To satisfy doc build
pass
def execute_build(self):
"""
Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
By default, this command builds from the current working directory. You can optionally pass in the
collection input path (where the ``galaxy.yml`` file is).
"""
force = context.CLIARGS['force']
output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
elif os.path.isfile(b_output_path):
raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
for collection_path in context.CLIARGS['args']:
collection_path = GalaxyCLI._resolve_path(collection_path)
build_collection(
to_text(collection_path, errors='surrogate_or_strict'),
to_text(output_path, errors='surrogate_or_strict'),
force,
)
@with_collection_artifacts_manager
def execute_download(self, artifacts_manager=None):
collections = context.CLIARGS['args']
no_deps = context.CLIARGS['no_deps']
download_path = context.CLIARGS['download_path']
requirements_file = context.CLIARGS['requirements']
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._require_one_of_collections_requirements(
collections, requirements_file,
artifacts_manager=artifacts_manager,
)['collections']
download_path = GalaxyCLI._resolve_path(download_path)
b_download_path = to_bytes(download_path, errors='surrogate_or_strict')
if not os.path.exists(b_download_path):
os.makedirs(b_download_path)
download_collections(
requirements, download_path, self.api_servers, no_deps,
context.CLIARGS['allow_pre_release'],
artifacts_manager=artifacts_manager,
)
return 0
def execute_init(self):
"""
Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
"""
galaxy_type = context.CLIARGS['type']
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
inject_data = dict(
description='your {0} description'.format(galaxy_type),
ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
)
if galaxy_type == 'role':
inject_data.update(dict(
author='your name',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
role_name=obj_name,
role_type=context.CLIARGS['role_type'],
issue_tracker_url='http://example.com/issue/tracker',
repository_url='http://example.com/repository',
documentation_url='http://docs.example.com',
homepage_url='http://example.com',
min_ansible_version=ansible_version[:3], # x.y
dependencies=[],
))
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
obj_path = os.path.join(init_path, obj_name)
elif galaxy_type == 'collection':
namespace, collection_name = obj_name.split('.', 1)
inject_data.update(dict(
namespace=namespace,
collection_name=collection_name,
version='1.0.0',
readme='README.md',
authors=['your name <[email protected]>'],
license=['GPL-2.0-or-later'],
repository='http://example.com/repository',
documentation='http://docs.example.com',
homepage='http://example.com',
issues='http://example.com/issue/tracker',
build_ignore=[],
))
skeleton_ignore_expressions = C.GALAXY_COLLECTION_SKELETON_IGNORE
obj_path = os.path.join(init_path, namespace, collection_name)
b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
if os.path.exists(b_obj_path):
if os.path.isfile(obj_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
elif not force:
raise AnsibleError("- the directory %s already exists. "
"You can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % to_native(obj_path))
# delete the contents rather than the collection root in case init was run from the root (--init-path ../../)
for root, dirs, files in os.walk(b_obj_path, topdown=True):
for old_dir in dirs:
path = os.path.join(root, old_dir)
shutil.rmtree(path)
for old_file in files:
path = os.path.join(root, old_file)
os.unlink(path)
if obj_skeleton is not None:
own_skeleton = False
else:
own_skeleton = True
obj_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
obj_skeleton = os.path.expanduser(obj_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
if not os.path.exists(obj_skeleton):
raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
to_native(obj_skeleton), galaxy_type)
)
loader = DataLoader()
templar = Templar(loader, variables=inject_data)
# create role directory
if not os.path.exists(b_obj_path):
os.makedirs(b_obj_path)
for root, dirs, files in os.walk(obj_skeleton, topdown=True):
rel_root = os.path.relpath(root, obj_skeleton)
rel_dirs = rel_root.split(os.sep)
rel_root_dir = rel_dirs[0]
if galaxy_type == 'collection':
# A collection can contain templates in playbooks/*/templates and roles/*/templates
in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
else:
in_templates_dir = rel_root_dir == 'templates'
# Filter out ignored directory names
# Use [:] to mutate the list os.walk uses
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
if galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
# Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
# dynamically which requires special options to be set.
# The templated data's keys must match the key name but the inject data contains collection_name
# instead of name. We just make a copy and change the key back to name for this file.
template_data = inject_data.copy()
template_data['name'] = template_data.pop('collection_name')
meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
with open(b_dest_file, 'wb') as galaxy_obj:
galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
template_data = to_text(loader._get_file_contents(src_template)[0], errors='surrogate_or_strict')
b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict')
with open(dest_file, 'wb') as df:
df.write(b_rendered)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path))
for d in dirs:
b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
if not os.path.exists(b_dir_path):
os.makedirs(b_dir_path)
display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, self.api, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
if not context.CLIARGS['offline']:
remote_data = None
try:
remote_data = self.api.lookup_role_by_name(role, False)
except AnsibleError as e:
if e.http_code == 400 and 'Bad Request' in e.message:
# Role does not exist in Ansible Galaxy
data = u"- the role %s was not found" % role
break
raise AnsibleError("Unable to find info about '%s': %s" % (role, e))
if remote_data:
role_info.update(remote_data)
elif context.CLIARGS['offline'] and not gr._exists:
data = u"- the role %s was not found" % role
break
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data += self._display_role_info(role_info)
self.pager(data)
@with_collection_artifacts_manager
def execute_verify(self, artifacts_manager=None):
collections = context.CLIARGS['args']
search_paths = context.CLIARGS['collections_path']
ignore_errors = context.CLIARGS['ignore_errors']
local_verify_only = context.CLIARGS['offline']
requirements_file = context.CLIARGS['requirements']
signatures = context.CLIARGS['signatures']
if signatures is not None:
signatures = list(signatures)
requirements = self._require_one_of_collections_requirements(
collections, requirements_file,
signatures=signatures,
artifacts_manager=artifacts_manager,
)['collections']
resolved_paths = [validate_collection_path(GalaxyCLI._resolve_path(path)) for path in search_paths]
results = verify_collections(
requirements, resolved_paths,
self.api_servers, ignore_errors,
local_verify_only=local_verify_only,
artifacts_manager=artifacts_manager,
)
if any(result for result in results if not result.success):
return 1
return 0
@with_collection_artifacts_manager
def execute_install(self, artifacts_manager=None):
"""
Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
You can pass in a list (roles or collections) or use the file
option listed below (these are mutually exclusive). If you pass in a list, it
can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
:param artifacts_manager: Artifacts manager.
"""
install_items = context.CLIARGS['args']
requirements_file = context.CLIARGS['requirements']
collection_path = None
signatures = context.CLIARGS.get('signatures')
if signatures is not None:
signatures = list(signatures)
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
two_type_warning = "The requirements file '%s' contains {0}s which will be ignored. To install these {0}s " \
"run 'ansible-galaxy {0} install -r' or to install both at the same time run " \
"'ansible-galaxy install -r' without a custom install path." % to_text(requirements_file)
# TODO: Would be nice to share the same behaviour with args and -r in collections and roles.
collection_requirements = []
role_requirements = []
if context.CLIARGS['type'] == 'collection':
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['collections_path'])
requirements = self._require_one_of_collections_requirements(
install_items, requirements_file,
signatures=signatures,
artifacts_manager=artifacts_manager,
)
collection_requirements = requirements['collections']
if requirements['roles']:
display.vvv(two_type_warning.format('role'))
else:
if not install_items and requirements_file is None:
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
if requirements_file:
if not (requirements_file.endswith('.yaml') or requirements_file.endswith('.yml')):
raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
galaxy_args = self._raw_args
will_install_collections = self._implicit_role and '-p' not in galaxy_args and '--roles-path' not in galaxy_args
requirements = self._parse_requirements_file(
requirements_file,
artifacts_manager=artifacts_manager,
validate_signature_options=will_install_collections,
)
role_requirements = requirements['roles']
# We can only install collections and roles at the same time if the type wasn't specified and the -p
# argument was not used. If collections are present in the requirements then at least display a msg.
if requirements['collections'] and (not self._implicit_role or '-p' in galaxy_args or
'--roles-path' in galaxy_args):
# We only want to display a warning if 'ansible-galaxy install -r ... -p ...'. Other cases the user
# was explicit about the type and shouldn't care that collections were skipped.
display_func = display.warning if self._implicit_role else display.vvv
display_func(two_type_warning.format('collection'))
else:
collection_path = self._get_default_collection_path()
collection_requirements = requirements['collections']
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
role_requirements.append(GalaxyRole(self.galaxy, self.api, **role))
if not role_requirements and not collection_requirements:
display.display("Skipping install, no requirements found")
return
if role_requirements:
display.display("Starting galaxy role install process")
self._execute_install_role(role_requirements)
if collection_requirements:
display.display("Starting galaxy collection install process")
# Collections can technically be installed even when ansible-galaxy is in role mode so we need to pass in
# the install path as context.CLIARGS['collections_path'] won't be set (default is calculated above).
self._execute_install_collection(
collection_requirements, collection_path,
artifacts_manager=artifacts_manager,
)
def _execute_install_collection(
self, requirements, path, artifacts_manager,
):
force = context.CLIARGS['force']
ignore_errors = context.CLIARGS['ignore_errors']
no_deps = context.CLIARGS['no_deps']
force_with_deps = context.CLIARGS['force_with_deps']
disable_gpg_verify = context.CLIARGS['disable_gpg_verify']
# If `ansible-galaxy install` is used, collection-only options aren't available to the user and won't be in context.CLIARGS
allow_pre_release = context.CLIARGS.get('allow_pre_release', False)
upgrade = context.CLIARGS.get('upgrade', False)
collections_path = C.COLLECTIONS_PATHS
if len([p for p in collections_path if p.startswith(path)]) == 0:
display.warning("The specified collections path '%s' is not part of the configured Ansible "
"collections paths '%s'. The installed collection will not be picked up in an Ansible "
"run, unless within a playbook-adjacent collections directory." % (to_text(path), to_text(":".join(collections_path))))
output_path = validate_collection_path(path)
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
install_collections(
requirements, output_path, self.api_servers, ignore_errors,
no_deps, force, force_with_deps, upgrade,
allow_pre_release=allow_pre_release,
artifacts_manager=artifacts_manager,
disable_gpg_verify=disable_gpg_verify,
)
return 0
def _execute_install_role(self, requirements):
role_file = context.CLIARGS['requirements']
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
force = context.CLIARGS['force'] or force_deps
for role in requirements:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
# NOTE: the meta file is also required for installing the role, not just dependencies
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata_dependencies + role.requirements
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, self.api, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in requirements:
display.display('- adding dependency: %s' % to_text(dep_role))
requirements.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependent role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
requirements.append(dep_role)
else:
display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
else:
if force_deps:
requirements.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, self.api, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
List installed collections or roles
"""
if context.CLIARGS['type'] == 'role':
self.execute_list_role()
elif context.CLIARGS['type'] == 'collection':
self.execute_list_collection()
def execute_list_role(self):
"""
List all roles installed on the local system or a specific role
"""
path_found = False
role_found = False
warnings = []
roles_search_paths = context.CLIARGS['roles_path']
role_name = context.CLIARGS['role']
for path in roles_search_paths:
role_path = GalaxyCLI._resolve_path(path)
if os.path.isdir(path):
path_found = True
else:
warnings.append("- the configured path {0} does not exist.".format(path))
continue
if role_name:
# show the requested role, if it exists
gr = GalaxyRole(self.galaxy, self.api, role_name, path=os.path.join(role_path, role_name))
if os.path.isdir(gr.path):
role_found = True
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
break
warnings.append("- the role %s was not found" % role_name)
else:
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
if not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, self.api, path_file, path=path)
if gr.metadata:
_display_role(gr)
# Do not warn if the role was found in any of the search paths
if role_found and role_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
return 0
@with_collection_artifacts_manager
def execute_list_collection(self, artifacts_manager=None):
"""
List all collections installed on the local system
:param artifacts_manager: Artifacts manager.
"""
output_format = context.CLIARGS['output_format']
collections_search_paths = set(context.CLIARGS['collections_path'])
collection_name = context.CLIARGS['collection']
default_collections_path = AnsibleCollectionConfig.collection_paths
collections_in_paths = {}
warnings = []
path_found = False
collection_found = False
for path in collections_search_paths:
collection_path = GalaxyCLI._resolve_path(path)
if not os.path.exists(path):
if path in default_collections_path:
# don't warn for missing default paths
continue
warnings.append("- the configured path {0} does not exist.".format(collection_path))
continue
if not os.path.isdir(collection_path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
continue
path_found = True
if collection_name:
# list a specific collection
validate_collection_name(collection_name)
namespace, collection = collection_name.split('.')
collection_path = validate_collection_path(collection_path)
b_collection_path = to_bytes(os.path.join(collection_path, namespace, collection), errors='surrogate_or_strict')
if not os.path.exists(b_collection_path):
warnings.append("- unable to find {0} in collection paths".format(collection_name))
continue
if not os.path.isdir(collection_path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
continue
collection_found = True
try:
collection = Requirement.from_dir_path_as_unknown(
b_collection_path,
artifacts_manager,
)
except ValueError as val_err:
six.raise_from(AnsibleError(val_err), val_err)
if output_format in {'yaml', 'json'}:
collections_in_paths[collection_path] = {
collection.fqcn: {'version': collection.ver}
}
continue
fqcn_width, version_width = _get_collection_widths([collection])
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
_display_collection(collection, fqcn_width, version_width)
else:
# list all collections
collection_path = validate_collection_path(path)
if os.path.isdir(collection_path):
display.vvv("Searching {0} for collections".format(collection_path))
collections = list(find_existing_collections(
collection_path, artifacts_manager,
))
else:
# There was no 'ansible_collections/' directory in the path, so there
# or no collections here.
display.vvv("No 'ansible_collections' directory found at {0}".format(collection_path))
continue
if not collections:
display.vvv("No collections found at {0}".format(collection_path))
continue
if output_format in {'yaml', 'json'}:
collections_in_paths[collection_path] = {
collection.fqcn: {'version': collection.ver} for collection in collections
}
continue
# Display header
fqcn_width, version_width = _get_collection_widths(collections)
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
# Sort collections by the namespace and name
for collection in sorted(collections, key=to_text):
_display_collection(collection, fqcn_width, version_width)
# Do not warn if the specific collection was found in any of the search paths
if collection_found and collection_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
if output_format == 'json':
display.display(json.dumps(collections_in_paths))
elif output_format == 'yaml':
display.display(yaml_dump(collections_in_paths))
return 0
def execute_publish(self):
"""
Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
"""
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
wait = context.CLIARGS['wait']
timeout = context.CLIARGS['import_timeout']
publish_collection(collection_path, self.api, wait, timeout)
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
source = context.CLIARGS['source']
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
secret = context.CLIARGS['secret']
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
def main(args=None):
GalaxyCLI.cli_executor(args)
if __name__ == '__main__':
main()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 70,180 |
Fatal error in ansible-galaxy collection list due to common SCM collection practice
|
##### SUMMARY
The command `ansible-galaxy collection list` should not _error_ when parsing a particular collection, because this prevents displaying a list of _all other_ collections.
This happens when simply using the source checkout of `community.vmware`.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
lib/ansible/cli/galaxy.py
##### ANSIBLE VERSION
```paste below
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out features under
development. This is a rapidly changing source of code and can become unstable at any point.
ansible 2.11.0.dev0
config file = None
configured module search path = ['/Users/alancoding/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/alancoding/Documents/repos/ansible/lib/ansible
executable location = /Users/alancoding/.virtualenvs/awx_collection/bin/ansible
python version = 3.7.7 (default, Mar 10 2020, 15:43:03) [Clang 11.0.0 (clang-1100.0.33.17)]
```
86606d3ca707e8836d2b956694bb55d3ae282aee
##### CONFIGURATION
```paste below
ansible-config dump --only-changed
```
(collection path modification in steps)
##### OS / ENVIRONMENT
N/A
##### STEPS TO REPRODUCE
```
base_dir=awx/plugins/collections/ansible_collections
mkdir -p $base_dir/community
git clone https://github.com/ansible-collections/vmware.git $base_dir/community/vmware
```
on the revision of this collection `2b277d44fa664443be679c13a66bdf2c60a75093`
```
ANSIBLE_COLLECTIONS_PATHS=awx/plugins/collections ansible-galaxy collection list -vvv
```
##### EXPECTED RESULTS
lists all the collections in the folder `awx/plugins/collections`, and gives `*` for the version of `community.vmware` because I don't expect that maintainers are necessarily including the version in their `galaxy.yml`.
In this particular case, the file is:
```yaml
---
namespace: community
name: vmware
# the version key is generated during the release by Zuul
# https://github.com/ansible-network/releases/tree/master/ansible_releases/cmd
# A script based on https://pypi.org/project/pbr/ will generate the version
# key. The version value depends on the tag or the last git tag.
readme: README.md
authors:
- Ansible (https://github.com/ansible)
description:
license_file: LICENSE
tags:
- cloud
- vmware
- virtualization
dependencies: {}
```
Of course they're doing this, they're using tags and CI in their release process. As an example of a collection with _no_ `galaxy.yml`
```
# /Users/alancoding/.ansible/collections/ansible_collections
Collection Version
---------------------- -----------
chrismeyersfsu.hax0r *
```
It should show the same thing for the vmware collection source checkout
##### ACTUAL RESULTS
```
ANSIBLE_COLLECTIONS_PATHS=awx/plugins/collections ansible-galaxy collection list -vvv
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out features under
development. This is a rapidly changing source of code and can become unstable at any point.
ansible-galaxy 2.11.0.dev0
config file = None
configured module search path = ['/Users/alancoding/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/alancoding/Documents/repos/ansible/lib/ansible
executable location = /Users/alancoding/.virtualenvs/awx_collection/bin/ansible-galaxy
python version = 3.7.7 (default, Mar 10 2020, 15:43:03) [Clang 11.0.0 (clang-1100.0.33.17)]
No config file found; using defaults
[DEPRECATION WARNING]: ANSIBLE_COLLECTIONS_PATHS option, all PATH-type options are singular PATH , use the "ANSIBLE_COLLECTIONS_PATH" environment variable instead. This feature
will be removed in version 2.14. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.
Searching /Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections for collections
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/alancoding-cloud-0.0.1.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/alancoding-cloud-0.0.3.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/.tox' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/build_artifact/openstack-cloud-0.0.1-dev82.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/alancoding-cloud-0.0.2.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/galaxy.yml' for collection build
Found installed collection alancoding.cloud:0.0.3 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/plugins/doc_fragments/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/galaxy.yml' for collection build
Found installed collection amazon.aws:0.1.0 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/plugins/doc_fragments/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/galaxy.yml' for collection build
Found installed collection azure.azcollection:0.1.3 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud/galaxy.yml' for collection build
Found installed collection google.cloud:0.0.9 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/posix/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/posix/galaxy.yml' for collection build
Found installed collection ansible.posix:0.1.1 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/posix'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/amazon/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/amazon/galaxy.yml' for collection build
Found installed collection ansible.amazon:0.1.0 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/amazon'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/theforeman/foreman/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/theforeman/foreman/galaxy.yml' for collection build
Found installed collection theforeman.foreman:0.7.0 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/theforeman/foreman'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/general/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/general/galaxy.yml' for collection build
Found installed collection community.general:0.1.1 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/general'
ERROR! The collection galaxy.yml at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/vmware/galaxy.yml' is missing the following mandatory keys: version
```
It's bad that the command terminates so fragilely. This also just doesn't jive with the common, published, policies of first-class collections.
It's also bad that it won't give a traceback for the error that happened.
FYI to @Akasurde (I'd expect no change in vmware collection) @jborean93 (most likely expect change in CLI error handling)
|
https://github.com/ansible/ansible/issues/70180
|
https://github.com/ansible/ansible/pull/76596
|
9b79d6ba3582bb0ef339738b7e1ade879d00dfe3
|
05608b20e8f875d51866a184f8c579fe60498e05
| 2020-06-19T19:09:14Z |
python
| 2022-08-02T15:46:47Z |
lib/ansible/galaxy/collection/concrete_artifact_manager.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Concrete collection candidate management helper module."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import tarfile
import subprocess
import typing as t
from contextlib import contextmanager
from hashlib import sha256
from urllib.error import URLError
from urllib.parse import urldefrag
from shutil import rmtree
from tempfile import mkdtemp
if t.TYPE_CHECKING:
from ansible.galaxy.dependency_resolution.dataclasses import (
Candidate, Requirement,
)
from ansible.galaxy.token import GalaxyToken
from ansible.errors import AnsibleError
from ansible.galaxy import get_collections_galaxy_meta_info
from ansible.galaxy.dependency_resolution.dataclasses import _GALAXY_YAML
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.yaml import yaml_load
from ansible.module_utils.six import raise_from
from ansible.module_utils.urls import open_url
from ansible.utils.display import Display
import yaml
display = Display()
MANIFEST_FILENAME = 'MANIFEST.json'
class ConcreteArtifactsManager:
"""Manager for on-disk collection artifacts.
It is responsible for:
* downloading remote collections from Galaxy-compatible servers and
direct links to tarballs or SCM repositories
* keeping track of local ones
* keeping track of Galaxy API tokens for downloads from Galaxy'ish
as well as the artifact hashes
* keeping track of Galaxy API signatures for downloads from Galaxy'ish
* caching all of above
* retrieving the metadata out of the downloaded artifacts
"""
def __init__(self, b_working_directory, validate_certs=True, keyring=None, timeout=60, required_signature_count=None, ignore_signature_errors=None):
# type: (bytes, bool, str, int, str, list[str]) -> None
"""Initialize ConcreteArtifactsManager caches and costraints."""
self._validate_certs = validate_certs # type: bool
self._artifact_cache = {} # type: dict[bytes, bytes]
self._galaxy_artifact_cache = {} # type: dict[Candidate | Requirement, bytes]
self._artifact_meta_cache = {} # type: dict[bytes, dict[str, str | list[str] | dict[str, str] | None]]
self._galaxy_collection_cache = {} # type: dict[Candidate | Requirement, tuple[str, str, GalaxyToken]]
self._galaxy_collection_origin_cache = {} # type: dict[Candidate, tuple[str, list[dict[str, str]]]]
self._b_working_directory = b_working_directory # type: bytes
self._supplemental_signature_cache = {} # type: dict[str, str]
self._keyring = keyring # type: str
self.timeout = timeout # type: int
self._required_signature_count = required_signature_count # type: str
self._ignore_signature_errors = ignore_signature_errors # type: list[str]
@property
def keyring(self):
return self._keyring
@property
def required_successful_signature_count(self):
return self._required_signature_count
@property
def ignore_signature_errors(self):
if self._ignore_signature_errors is None:
return []
return self._ignore_signature_errors
def get_galaxy_artifact_source_info(self, collection):
# type: (Candidate) -> dict[str, t.Union[str, list[dict[str, str]]]]
server = collection.src.api_server
try:
download_url = self._galaxy_collection_cache[collection][0]
signatures_url, signatures = self._galaxy_collection_origin_cache[collection]
except KeyError as key_err:
raise RuntimeError(
'The is no known source for {coll!s}'.
format(coll=collection),
) from key_err
return {
"format_version": "1.0.0",
"namespace": collection.namespace,
"name": collection.name,
"version": collection.ver,
"server": server,
"version_url": signatures_url,
"download_url": download_url,
"signatures": signatures,
}
def get_galaxy_artifact_path(self, collection):
# type: (t.Union[Candidate, Requirement]) -> bytes
"""Given a Galaxy-stored collection, return a cached path.
If it's not yet on disk, this method downloads the artifact first.
"""
try:
return self._galaxy_artifact_cache[collection]
except KeyError:
pass
try:
url, sha256_hash, token = self._galaxy_collection_cache[collection]
except KeyError as key_err:
raise_from(
RuntimeError(
'The is no known source for {coll!s}'.
format(coll=collection),
),
key_err,
)
display.vvvv(
"Fetching a collection tarball for '{collection!s}' from "
'Ansible Galaxy'.format(collection=collection),
)
try:
b_artifact_path = _download_file(
url,
self._b_working_directory,
expected_hash=sha256_hash,
validate_certs=self._validate_certs,
token=token,
) # type: bytes
except URLError as err:
raise_from(
AnsibleError(
'Failed to download collection tar '
"from '{coll_src!s}': {download_err!s}".
format(
coll_src=to_native(collection.src),
download_err=to_native(err),
),
),
err,
)
else:
display.vvv(
"Collection '{coll!s}' obtained from "
'server {server!s} {url!s}'.format(
coll=collection, server=collection.src or 'Galaxy',
url=collection.src.api_server if collection.src is not None
else '',
)
)
self._galaxy_artifact_cache[collection] = b_artifact_path
return b_artifact_path
def get_artifact_path(self, collection):
# type: (t.Union[Candidate, Requirement]) -> bytes
"""Given a concrete collection pointer, return a cached path.
If it's not yet on disk, this method downloads the artifact first.
"""
try:
return self._artifact_cache[collection.src]
except KeyError:
pass
# NOTE: SCM needs to be special-cased as it may contain either
# NOTE: one collection in its root, or a number of top-level
# NOTE: collection directories instead.
# NOTE: The idea is to store the SCM collection as unpacked
# NOTE: directory structure under the temporary location and use
# NOTE: a "virtual" collection that has pinned requirements on
# NOTE: the directories under that SCM checkout that correspond
# NOTE: to collections.
# NOTE: This brings us to the idea that we need two separate
# NOTE: virtual Requirement/Candidate types --
# NOTE: (single) dir + (multidir) subdirs
if collection.is_url:
display.vvvv(
"Collection requirement '{collection!s}' is a URL "
'to a tar artifact'.format(collection=collection.fqcn),
)
try:
b_artifact_path = _download_file(
collection.src,
self._b_working_directory,
expected_hash=None, # NOTE: URLs don't support checksums
validate_certs=self._validate_certs,
timeout=self.timeout
)
except Exception as err:
raise_from(
AnsibleError(
'Failed to download collection tar '
"from '{coll_src!s}': {download_err!s}".
format(
coll_src=to_native(collection.src),
download_err=to_native(err),
),
),
err,
)
elif collection.is_scm:
b_artifact_path = _extract_collection_from_git(
collection.src,
collection.ver,
self._b_working_directory,
)
elif collection.is_file or collection.is_dir or collection.is_subdirs:
b_artifact_path = to_bytes(collection.src)
else:
# NOTE: This may happen `if collection.is_online_index_pointer`
raise RuntimeError(
'The artifact is of an unexpected type {art_type!s}'.
format(art_type=collection.type)
)
self._artifact_cache[collection.src] = b_artifact_path
return b_artifact_path
def _get_direct_collection_namespace(self, collection):
# type: (Candidate) -> t.Optional[str]
return self.get_direct_collection_meta(collection)['namespace'] # type: ignore[return-value]
def _get_direct_collection_name(self, collection):
# type: (Candidate) -> t.Optional[str]
return self.get_direct_collection_meta(collection)['name'] # type: ignore[return-value]
def get_direct_collection_fqcn(self, collection):
# type: (Candidate) -> t.Optional[str]
"""Extract FQCN from the given on-disk collection artifact.
If the collection is virtual, ``None`` is returned instead
of a string.
"""
if collection.is_virtual:
# NOTE: should it be something like "<virtual>"?
return None
return '.'.join(( # type: ignore[type-var]
self._get_direct_collection_namespace(collection), # type: ignore[arg-type]
self._get_direct_collection_name(collection),
))
def get_direct_collection_version(self, collection):
# type: (t.Union[Candidate, Requirement]) -> str
"""Extract version from the given on-disk collection artifact."""
return self.get_direct_collection_meta(collection)['version'] # type: ignore[return-value]
def get_direct_collection_dependencies(self, collection):
# type: (t.Union[Candidate, Requirement]) -> dict[str, str]
"""Extract deps from the given on-disk collection artifact."""
collection_dependencies = self.get_direct_collection_meta(collection)['dependencies']
if collection_dependencies is None:
collection_dependencies = {}
return collection_dependencies # type: ignore[return-value]
def get_direct_collection_meta(self, collection):
# type: (t.Union[Candidate, Requirement]) -> dict[str, t.Union[str, dict[str, str], list[str], None]]
"""Extract meta from the given on-disk collection artifact."""
try: # FIXME: use unique collection identifier as a cache key?
return self._artifact_meta_cache[collection.src]
except KeyError:
b_artifact_path = self.get_artifact_path(collection)
if collection.is_url or collection.is_file:
collection_meta = _get_meta_from_tar(b_artifact_path)
elif collection.is_dir: # should we just build a coll instead?
# FIXME: what if there's subdirs?
try:
collection_meta = _get_meta_from_dir(b_artifact_path)
except LookupError as lookup_err:
raise_from(
AnsibleError(
'Failed to find the collection dir deps: {err!s}'.
format(err=to_native(lookup_err)),
),
lookup_err,
)
elif collection.is_scm:
collection_meta = {
'name': None,
'namespace': None,
'dependencies': {to_native(b_artifact_path): '*'},
'version': '*',
}
elif collection.is_subdirs:
collection_meta = {
'name': None,
'namespace': None,
# NOTE: Dropping b_artifact_path since it's based on src anyway
'dependencies': dict.fromkeys(
map(to_native, collection.namespace_collection_paths),
'*',
),
'version': '*',
}
else:
raise RuntimeError
self._artifact_meta_cache[collection.src] = collection_meta
return collection_meta
def save_collection_source(self, collection, url, sha256_hash, token, signatures_url, signatures):
# type: (Candidate, str, str, GalaxyToken, str, list[dict[str, str]]) -> None
"""Store collection URL, SHA256 hash and Galaxy API token.
This is a hook that is supposed to be called before attempting to
download Galaxy-based collections with ``get_galaxy_artifact_path()``.
"""
self._galaxy_collection_cache[collection] = url, sha256_hash, token
self._galaxy_collection_origin_cache[collection] = signatures_url, signatures
@classmethod
@contextmanager
def under_tmpdir(
cls,
temp_dir_base, # type: str
validate_certs=True, # type: bool
keyring=None, # type: str
required_signature_count=None, # type: str
ignore_signature_errors=None, # type: list[str]
): # type: (...) -> t.Iterator[ConcreteArtifactsManager]
"""Custom ConcreteArtifactsManager constructor with temp dir.
This method returns a context manager that allocates and cleans
up a temporary directory for caching the collection artifacts
during the dependency resolution process.
"""
# NOTE: Can't use `with tempfile.TemporaryDirectory:`
# NOTE: because it's not in Python 2 stdlib.
temp_path = mkdtemp(
dir=to_bytes(temp_dir_base, errors='surrogate_or_strict'),
)
b_temp_path = to_bytes(temp_path, errors='surrogate_or_strict')
try:
yield cls(
b_temp_path,
validate_certs,
keyring=keyring,
required_signature_count=required_signature_count,
ignore_signature_errors=ignore_signature_errors
)
finally:
rmtree(b_temp_path)
def parse_scm(collection, version):
"""Extract name, version, path and subdir out of the SCM pointer."""
if ',' in collection:
collection, version = collection.split(',', 1)
elif version == '*' or not version:
version = 'HEAD'
if collection.startswith('git+'):
path = collection[4:]
else:
path = collection
path, fragment = urldefrag(path)
fragment = fragment.strip(os.path.sep)
if path.endswith(os.path.sep + '.git'):
name = path.split(os.path.sep)[-2]
elif '://' not in path and '@' not in path:
name = path
else:
name = path.split('/')[-1]
if name.endswith('.git'):
name = name[:-4]
return name, version, path, fragment
def _extract_collection_from_git(repo_url, coll_ver, b_path):
name, version, git_url, fragment = parse_scm(repo_url, coll_ver)
b_checkout_path = mkdtemp(
dir=b_path,
prefix=to_bytes(name, errors='surrogate_or_strict'),
) # type: bytes
try:
git_executable = get_bin_path('git')
except ValueError as err:
raise AnsibleError(
"Could not find git executable to extract the collection from the Git repository `{repo_url!s}`.".
format(repo_url=to_native(git_url))
) from err
# Perform a shallow clone if simply cloning HEAD
if version == 'HEAD':
git_clone_cmd = git_executable, 'clone', '--depth=1', git_url, to_text(b_checkout_path)
else:
git_clone_cmd = git_executable, 'clone', git_url, to_text(b_checkout_path)
# FIXME: '--branch', version
try:
subprocess.check_call(git_clone_cmd)
except subprocess.CalledProcessError as proc_err:
raise_from(
AnsibleError( # should probably be LookupError
'Failed to clone a Git repository from `{repo_url!s}`.'.
format(repo_url=to_native(git_url)),
),
proc_err,
)
git_switch_cmd = git_executable, 'checkout', to_text(version)
try:
subprocess.check_call(git_switch_cmd, cwd=b_checkout_path)
except subprocess.CalledProcessError as proc_err:
raise_from(
AnsibleError( # should probably be LookupError
'Failed to switch a cloned Git repo `{repo_url!s}` '
'to the requested revision `{commitish!s}`.'.
format(
commitish=to_native(version),
repo_url=to_native(git_url),
),
),
proc_err,
)
return (
os.path.join(b_checkout_path, to_bytes(fragment))
if fragment else b_checkout_path
)
# FIXME: use random subdirs while preserving the file names
def _download_file(url, b_path, expected_hash, validate_certs, token=None, timeout=60):
# type: (str, bytes, t.Optional[str], bool, GalaxyToken, int) -> bytes
# ^ NOTE: used in download and verify_collections ^
b_tarball_name = to_bytes(
url.rsplit('/', 1)[1], errors='surrogate_or_strict',
)
b_file_name = b_tarball_name[:-len('.tar.gz')]
b_tarball_dir = mkdtemp(
dir=b_path,
prefix=b'-'.join((b_file_name, b'')),
) # type: bytes
b_file_path = os.path.join(b_tarball_dir, b_tarball_name)
display.display("Downloading %s to %s" % (url, to_text(b_tarball_dir)))
# NOTE: Galaxy redirects downloads to S3 which rejects the request
# NOTE: if an Authorization header is attached so don't redirect it
resp = open_url(
to_native(url, errors='surrogate_or_strict'),
validate_certs=validate_certs,
headers=None if token is None else token.headers(),
unredirected_headers=['Authorization'], http_agent=user_agent(),
timeout=timeout
)
with open(b_file_path, 'wb') as download_file: # type: t.BinaryIO
actual_hash = _consume_file(resp, write_to=download_file)
if expected_hash:
display.vvvv(
'Validating downloaded file hash {actual_hash!s} with '
'expected hash {expected_hash!s}'.
format(actual_hash=actual_hash, expected_hash=expected_hash)
)
if expected_hash != actual_hash:
raise AnsibleError('Mismatch artifact hash with downloaded file')
return b_file_path
def _consume_file(read_from, write_to=None):
# type: (t.BinaryIO, t.BinaryIO) -> str
bufsize = 65536
sha256_digest = sha256()
data = read_from.read(bufsize)
while data:
if write_to is not None:
write_to.write(data)
write_to.flush()
sha256_digest.update(data)
data = read_from.read(bufsize)
return sha256_digest.hexdigest()
def _normalize_galaxy_yml_manifest(
galaxy_yml, # type: dict[str, t.Union[str, list[str], dict[str, str], None]]
b_galaxy_yml_path, # type: bytes
):
# type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None]]
galaxy_yml_schema = (
get_collections_galaxy_meta_info()
) # type: list[dict[str, t.Any]] # FIXME: <--
# FIXME: 👆maybe precise type: list[dict[str, t.Union[bool, str, list[str]]]]
mandatory_keys = set()
string_keys = set() # type: set[str]
list_keys = set() # type: set[str]
dict_keys = set() # type: set[str]
for info in galaxy_yml_schema:
if info.get('required', False):
mandatory_keys.add(info['key'])
key_list_type = {
'str': string_keys,
'list': list_keys,
'dict': dict_keys,
}[info.get('type', 'str')]
key_list_type.add(info['key'])
all_keys = frozenset(list(mandatory_keys) + list(string_keys) + list(list_keys) + list(dict_keys))
set_keys = set(galaxy_yml.keys())
missing_keys = mandatory_keys.difference(set_keys)
if missing_keys:
raise AnsibleError("The collection galaxy.yml at '%s' is missing the following mandatory keys: %s"
% (to_native(b_galaxy_yml_path), ", ".join(sorted(missing_keys))))
extra_keys = set_keys.difference(all_keys)
if len(extra_keys) > 0:
display.warning("Found unknown keys in collection galaxy.yml at '%s': %s"
% (to_text(b_galaxy_yml_path), ", ".join(extra_keys)))
# Add the defaults if they have not been set
for optional_string in string_keys:
if optional_string not in galaxy_yml:
galaxy_yml[optional_string] = None
for optional_list in list_keys:
list_val = galaxy_yml.get(optional_list, None)
if list_val is None:
galaxy_yml[optional_list] = []
elif not isinstance(list_val, list):
galaxy_yml[optional_list] = [list_val] # type: ignore[list-item]
for optional_dict in dict_keys:
if optional_dict not in galaxy_yml:
galaxy_yml[optional_dict] = {}
# NOTE: `version: null` is only allowed for `galaxy.yml`
# NOTE: and not `MANIFEST.json`. The use-case for it is collections
# NOTE: that generate the version from Git before building a
# NOTE: distributable tarball artifact.
if not galaxy_yml.get('version'):
galaxy_yml['version'] = '*'
return galaxy_yml
def _get_meta_from_dir(
b_path, # type: bytes
): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None]]
try:
return _get_meta_from_installed_dir(b_path)
except LookupError:
return _get_meta_from_src_dir(b_path)
def _get_meta_from_src_dir(
b_path, # type: bytes
): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None]]
galaxy_yml = os.path.join(b_path, _GALAXY_YAML)
if not os.path.isfile(galaxy_yml):
raise LookupError(
"The collection galaxy.yml path '{path!s}' does not exist.".
format(path=to_native(galaxy_yml))
)
with open(galaxy_yml, 'rb') as manifest_file_obj:
try:
manifest = yaml_load(manifest_file_obj)
except yaml.error.YAMLError as yaml_err:
raise_from(
AnsibleError(
"Failed to parse the galaxy.yml at '{path!s}' with "
'the following error:\n{err_txt!s}'.
format(
path=to_native(galaxy_yml),
err_txt=to_native(yaml_err),
),
),
yaml_err,
)
return _normalize_galaxy_yml_manifest(manifest, galaxy_yml)
def _get_json_from_installed_dir(
b_path, # type: bytes
filename, # type: str
): # type: (...) -> dict
b_json_filepath = os.path.join(b_path, to_bytes(filename, errors='surrogate_or_strict'))
try:
with open(b_json_filepath, 'rb') as manifest_fd:
b_json_text = manifest_fd.read()
except (IOError, OSError):
raise LookupError(
"The collection {manifest!s} path '{path!s}' does not exist.".
format(
manifest=filename,
path=to_native(b_json_filepath),
)
)
manifest_txt = to_text(b_json_text, errors='surrogate_or_strict')
try:
manifest = json.loads(manifest_txt)
except ValueError:
raise AnsibleError(
'Collection tar file member {member!s} does not '
'contain a valid json string.'.
format(member=filename),
)
return manifest
def _get_meta_from_installed_dir(
b_path, # type: bytes
): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None]]
manifest = _get_json_from_installed_dir(b_path, MANIFEST_FILENAME)
collection_info = manifest['collection_info']
version = collection_info.get('version')
if not version:
raise AnsibleError(
u'Collection metadata file `{manifest_filename!s}` at `{meta_file!s}` is expected '
u'to have a valid SemVer version value but got {version!s}'.
format(
manifest_filename=MANIFEST_FILENAME,
meta_file=to_text(b_path),
version=to_text(repr(version)),
),
)
return collection_info
def _get_meta_from_tar(
b_path, # type: bytes
): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None]]
if not tarfile.is_tarfile(b_path):
raise AnsibleError(
"Collection artifact at '{path!s}' is not a valid tar file.".
format(path=to_native(b_path)),
)
with tarfile.open(b_path, mode='r') as collection_tar: # type: tarfile.TarFile
try:
member = collection_tar.getmember(MANIFEST_FILENAME)
except KeyError:
raise AnsibleError(
"Collection at '{path!s}' does not contain the "
'required file {manifest_file!s}.'.
format(
path=to_native(b_path),
manifest_file=MANIFEST_FILENAME,
),
)
with _tarfile_extract(collection_tar, member) as (_member, member_obj):
if member_obj is None:
raise AnsibleError(
'Collection tar file does not contain '
'member {member!s}'.format(member=MANIFEST_FILENAME),
)
text_content = to_text(
member_obj.read(),
errors='surrogate_or_strict',
)
try:
manifest = json.loads(text_content)
except ValueError:
raise AnsibleError(
'Collection tar file member {member!s} does not '
'contain a valid json string.'.
format(member=MANIFEST_FILENAME),
)
return manifest['collection_info']
@contextmanager
def _tarfile_extract(
tar, # type: tarfile.TarFile
member, # type: tarfile.TarInfo
):
# type: (...) -> t.Iterator[tuple[tarfile.TarInfo, t.Optional[t.IO[bytes]]]]
tar_obj = tar.extractfile(member)
try:
yield member, tar_obj
finally:
if tar_obj is not None:
tar_obj.close()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 70,180 |
Fatal error in ansible-galaxy collection list due to common SCM collection practice
|
##### SUMMARY
The command `ansible-galaxy collection list` should not _error_ when parsing a particular collection, because this prevents displaying a list of _all other_ collections.
This happens when simply using the source checkout of `community.vmware`.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
lib/ansible/cli/galaxy.py
##### ANSIBLE VERSION
```paste below
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out features under
development. This is a rapidly changing source of code and can become unstable at any point.
ansible 2.11.0.dev0
config file = None
configured module search path = ['/Users/alancoding/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/alancoding/Documents/repos/ansible/lib/ansible
executable location = /Users/alancoding/.virtualenvs/awx_collection/bin/ansible
python version = 3.7.7 (default, Mar 10 2020, 15:43:03) [Clang 11.0.0 (clang-1100.0.33.17)]
```
86606d3ca707e8836d2b956694bb55d3ae282aee
##### CONFIGURATION
```paste below
ansible-config dump --only-changed
```
(collection path modification in steps)
##### OS / ENVIRONMENT
N/A
##### STEPS TO REPRODUCE
```
base_dir=awx/plugins/collections/ansible_collections
mkdir -p $base_dir/community
git clone https://github.com/ansible-collections/vmware.git $base_dir/community/vmware
```
on the revision of this collection `2b277d44fa664443be679c13a66bdf2c60a75093`
```
ANSIBLE_COLLECTIONS_PATHS=awx/plugins/collections ansible-galaxy collection list -vvv
```
##### EXPECTED RESULTS
lists all the collections in the folder `awx/plugins/collections`, and gives `*` for the version of `community.vmware` because I don't expect that maintainers are necessarily including the version in their `galaxy.yml`.
In this particular case, the file is:
```yaml
---
namespace: community
name: vmware
# the version key is generated during the release by Zuul
# https://github.com/ansible-network/releases/tree/master/ansible_releases/cmd
# A script based on https://pypi.org/project/pbr/ will generate the version
# key. The version value depends on the tag or the last git tag.
readme: README.md
authors:
- Ansible (https://github.com/ansible)
description:
license_file: LICENSE
tags:
- cloud
- vmware
- virtualization
dependencies: {}
```
Of course they're doing this, they're using tags and CI in their release process. As an example of a collection with _no_ `galaxy.yml`
```
# /Users/alancoding/.ansible/collections/ansible_collections
Collection Version
---------------------- -----------
chrismeyersfsu.hax0r *
```
It should show the same thing for the vmware collection source checkout
##### ACTUAL RESULTS
```
ANSIBLE_COLLECTIONS_PATHS=awx/plugins/collections ansible-galaxy collection list -vvv
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out features under
development. This is a rapidly changing source of code and can become unstable at any point.
ansible-galaxy 2.11.0.dev0
config file = None
configured module search path = ['/Users/alancoding/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/alancoding/Documents/repos/ansible/lib/ansible
executable location = /Users/alancoding/.virtualenvs/awx_collection/bin/ansible-galaxy
python version = 3.7.7 (default, Mar 10 2020, 15:43:03) [Clang 11.0.0 (clang-1100.0.33.17)]
No config file found; using defaults
[DEPRECATION WARNING]: ANSIBLE_COLLECTIONS_PATHS option, all PATH-type options are singular PATH , use the "ANSIBLE_COLLECTIONS_PATH" environment variable instead. This feature
will be removed in version 2.14. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.
Searching /Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections for collections
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/alancoding-cloud-0.0.1.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/alancoding-cloud-0.0.3.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/.tox' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/build_artifact/openstack-cloud-0.0.1-dev82.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/alancoding-cloud-0.0.2.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/galaxy.yml' for collection build
Found installed collection alancoding.cloud:0.0.3 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/plugins/doc_fragments/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/galaxy.yml' for collection build
Found installed collection amazon.aws:0.1.0 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/plugins/doc_fragments/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/galaxy.yml' for collection build
Found installed collection azure.azcollection:0.1.3 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud/galaxy.yml' for collection build
Found installed collection google.cloud:0.0.9 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/posix/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/posix/galaxy.yml' for collection build
Found installed collection ansible.posix:0.1.1 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/posix'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/amazon/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/amazon/galaxy.yml' for collection build
Found installed collection ansible.amazon:0.1.0 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/amazon'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/theforeman/foreman/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/theforeman/foreman/galaxy.yml' for collection build
Found installed collection theforeman.foreman:0.7.0 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/theforeman/foreman'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/general/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/general/galaxy.yml' for collection build
Found installed collection community.general:0.1.1 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/general'
ERROR! The collection galaxy.yml at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/vmware/galaxy.yml' is missing the following mandatory keys: version
```
It's bad that the command terminates so fragilely. This also just doesn't jive with the common, published, policies of first-class collections.
It's also bad that it won't give a traceback for the error that happened.
FYI to @Akasurde (I'd expect no change in vmware collection) @jborean93 (most likely expect change in CLI error handling)
|
https://github.com/ansible/ansible/issues/70180
|
https://github.com/ansible/ansible/pull/76596
|
9b79d6ba3582bb0ef339738b7e1ade879d00dfe3
|
05608b20e8f875d51866a184f8c579fe60498e05
| 2020-06-19T19:09:14Z |
python
| 2022-08-02T15:46:47Z |
test/integration/targets/ansible-galaxy-collection/tasks/list.yml
|
- name: initialize collection structure
command: ansible-galaxy collection init {{ item }} --init-path "{{ galaxy_dir }}/dev/ansible_collections" {{ galaxy_verbosity }}
loop:
- 'dev.collection1'
- 'dev.collection2'
- 'dev.collection3'
- 'dev.collection4'
- name: replace the default version of the collections
lineinfile:
path: "{{ galaxy_dir }}/dev/ansible_collections/dev/{{ item.name }}/galaxy.yml"
line: "{{ item.version }}"
regexp: "version: .*"
loop:
- name: "collection1"
version: "version: null"
- name: "collection2"
version: "version: placeholder"
- name: "collection3"
version: "version: ''"
- name: set the namespace, name, and version keys to None
lineinfile:
path: "{{ galaxy_dir }}/dev/ansible_collections/dev/collection4/galaxy.yml"
line: "{{ item.after }}"
regexp: "{{ item.before }}"
loop:
- before: "^namespace: dev"
after: "namespace:"
- before: "^name: collection4"
after: "name:"
- before: "^version: 1.0.0"
after: "version:"
- name: list collections in development without semver versions
command: ansible-galaxy collection list {{ galaxy_verbosity }}
register: list_result
environment:
ANSIBLE_COLLECTIONS_PATH: "{{ galaxy_dir }}/dev:{{ galaxy_dir }}/prod"
- assert:
that:
- "'dev.collection1 *' in list_result.stdout"
# Note the version displayed is the 'placeholder' string rather than "*" since it is not falsey
- "'dev.collection2 placeholder' in list_result.stdout"
- "'dev.collection3 *' in list_result.stdout"
- "'dev.collection4 *' in list_result.stdout"
- name: list collections in human format
command: ansible-galaxy collection list --format human
register: list_result_human
environment:
ANSIBLE_COLLECTIONS_PATH: "{{ galaxy_dir }}/dev:{{ galaxy_dir }}/prod"
- assert:
that:
- "'dev.collection1 *' in list_result_human.stdout"
# Note the version displayed is the 'placeholder' string rather than "*" since it is not falsey
- "'dev.collection2 placeholder' in list_result_human.stdout"
- "'dev.collection3 *' in list_result_human.stdout"
- name: list collections in yaml format
command: ansible-galaxy collection list --format yaml
register: list_result_yaml
environment:
ANSIBLE_COLLECTIONS_PATH: "{{ galaxy_dir }}/dev:{{ galaxy_dir }}/prod"
- assert:
that:
- "item.value | length == 4"
- "item.value['dev.collection1'].version == '*'"
- "item.value['dev.collection2'].version == 'placeholder'"
- "item.value['dev.collection3'].version == '*'"
with_dict: "{{ list_result_yaml.stdout | from_yaml }}"
- name: list collections in json format
command: ansible-galaxy collection list --format json
register: list_result_json
environment:
ANSIBLE_COLLECTIONS_PATH: "{{ galaxy_dir }}/dev:{{ galaxy_dir }}/prod"
- assert:
that:
- "item.value | length == 4"
- "item.value['dev.collection1'].version == '*'"
- "item.value['dev.collection2'].version == 'placeholder'"
- "item.value['dev.collection3'].version == '*'"
with_dict: "{{ list_result_json.stdout | from_json }}"
- name: list single collection in json format
command: "ansible-galaxy collection list {{ item.key }} --format json"
register: list_single_result_json
environment:
ANSIBLE_COLLECTIONS_PATH: "{{ galaxy_dir }}/dev:{{ galaxy_dir }}/prod"
with_dict: "{{ { 'dev.collection1': '*', 'dev.collection2': 'placeholder', 'dev.collection3': '*' } }}"
- assert:
that:
- "(item.stdout | from_json)[galaxy_dir + '/dev/ansible_collections'][item.item.key].version == item.item.value"
with_items: "{{ list_single_result_json.results }}"
- name: list single collection in yaml format
command: "ansible-galaxy collection list {{ item.key }} --format yaml"
register: list_single_result_yaml
environment:
ANSIBLE_COLLECTIONS_PATH: "{{ galaxy_dir }}/dev:{{ galaxy_dir }}/prod"
with_dict: "{{ { 'dev.collection1': '*', 'dev.collection2': 'placeholder', 'dev.collection3': '*' } }}"
- assert:
that:
- "(item.stdout | from_yaml)[galaxy_dir + '/dev/ansible_collections'][item.item.key].version == item.item.value"
with_items: "{{ list_single_result_json.results }}"
- name: test that no json is emitted when no collection paths are usable
command: "ansible-galaxy collection list --format json"
register: list_result_error
ignore_errors: True
environment:
ANSIBLE_COLLECTIONS_PATH: ""
- assert:
that:
- "'{}' not in list_result_error.stdout"
- name: install an artifact to the second collections path
command: ansible-galaxy collection install namespace1.name1 -s galaxy_ng {{ galaxy_verbosity }} -p "{{ galaxy_dir }}/prod"
environment:
ANSIBLE_CONFIG: '{{ galaxy_dir }}/ansible.cfg'
- name: replace the artifact version
lineinfile:
path: "{{ galaxy_dir }}/prod/ansible_collections/namespace1/name1/MANIFEST.json"
line: ' "version": null,'
regexp: ' "version": .*'
- name: test listing collections in all paths
command: ansible-galaxy collection list {{ galaxy_verbosity }}
register: list_result
ignore_errors: True
environment:
ANSIBLE_COLLECTIONS_PATH: "{{ galaxy_dir }}/dev:{{ galaxy_dir }}/prod"
- assert:
that:
- list_result is failed
- "'is expected to have a valid SemVer version value but got None' in list_result.stderr"
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 70,180 |
Fatal error in ansible-galaxy collection list due to common SCM collection practice
|
##### SUMMARY
The command `ansible-galaxy collection list` should not _error_ when parsing a particular collection, because this prevents displaying a list of _all other_ collections.
This happens when simply using the source checkout of `community.vmware`.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
lib/ansible/cli/galaxy.py
##### ANSIBLE VERSION
```paste below
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out features under
development. This is a rapidly changing source of code and can become unstable at any point.
ansible 2.11.0.dev0
config file = None
configured module search path = ['/Users/alancoding/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/alancoding/Documents/repos/ansible/lib/ansible
executable location = /Users/alancoding/.virtualenvs/awx_collection/bin/ansible
python version = 3.7.7 (default, Mar 10 2020, 15:43:03) [Clang 11.0.0 (clang-1100.0.33.17)]
```
86606d3ca707e8836d2b956694bb55d3ae282aee
##### CONFIGURATION
```paste below
ansible-config dump --only-changed
```
(collection path modification in steps)
##### OS / ENVIRONMENT
N/A
##### STEPS TO REPRODUCE
```
base_dir=awx/plugins/collections/ansible_collections
mkdir -p $base_dir/community
git clone https://github.com/ansible-collections/vmware.git $base_dir/community/vmware
```
on the revision of this collection `2b277d44fa664443be679c13a66bdf2c60a75093`
```
ANSIBLE_COLLECTIONS_PATHS=awx/plugins/collections ansible-galaxy collection list -vvv
```
##### EXPECTED RESULTS
lists all the collections in the folder `awx/plugins/collections`, and gives `*` for the version of `community.vmware` because I don't expect that maintainers are necessarily including the version in their `galaxy.yml`.
In this particular case, the file is:
```yaml
---
namespace: community
name: vmware
# the version key is generated during the release by Zuul
# https://github.com/ansible-network/releases/tree/master/ansible_releases/cmd
# A script based on https://pypi.org/project/pbr/ will generate the version
# key. The version value depends on the tag or the last git tag.
readme: README.md
authors:
- Ansible (https://github.com/ansible)
description:
license_file: LICENSE
tags:
- cloud
- vmware
- virtualization
dependencies: {}
```
Of course they're doing this, they're using tags and CI in their release process. As an example of a collection with _no_ `galaxy.yml`
```
# /Users/alancoding/.ansible/collections/ansible_collections
Collection Version
---------------------- -----------
chrismeyersfsu.hax0r *
```
It should show the same thing for the vmware collection source checkout
##### ACTUAL RESULTS
```
ANSIBLE_COLLECTIONS_PATHS=awx/plugins/collections ansible-galaxy collection list -vvv
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out features under
development. This is a rapidly changing source of code and can become unstable at any point.
ansible-galaxy 2.11.0.dev0
config file = None
configured module search path = ['/Users/alancoding/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/alancoding/Documents/repos/ansible/lib/ansible
executable location = /Users/alancoding/.virtualenvs/awx_collection/bin/ansible-galaxy
python version = 3.7.7 (default, Mar 10 2020, 15:43:03) [Clang 11.0.0 (clang-1100.0.33.17)]
No config file found; using defaults
[DEPRECATION WARNING]: ANSIBLE_COLLECTIONS_PATHS option, all PATH-type options are singular PATH , use the "ANSIBLE_COLLECTIONS_PATH" environment variable instead. This feature
will be removed in version 2.14. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.
Searching /Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections for collections
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/alancoding-cloud-0.0.1.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/alancoding-cloud-0.0.3.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/.tox' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/build_artifact/openstack-cloud-0.0.1-dev82.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/alancoding-cloud-0.0.2.tar.gz' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud/galaxy.yml' for collection build
Found installed collection alancoding.cloud:0.0.3 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/openstack/cloud'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/plugins/doc_fragments/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws/galaxy.yml' for collection build
Found installed collection amazon.aws:0.1.0 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/amazon/aws'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/plugins/doc_fragments/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection/galaxy.yml' for collection build
Found installed collection azure.azcollection:0.1.3 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/azure/azcollection'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud/plugins/inventory/__pycache__' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud/galaxy.yml' for collection build
Found installed collection google.cloud:0.0.9 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/google/cloud'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/posix/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/posix/galaxy.yml' for collection build
Found installed collection ansible.posix:0.1.1 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/posix'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/amazon/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/amazon/galaxy.yml' for collection build
Found installed collection ansible.amazon:0.1.0 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/ansible/amazon'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/theforeman/foreman/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/theforeman/foreman/galaxy.yml' for collection build
Found installed collection theforeman.foreman:0.7.0 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/theforeman/foreman'
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/general/.git' for collection build
Skipping '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/general/galaxy.yml' for collection build
Found installed collection community.general:0.1.1 at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/general'
ERROR! The collection galaxy.yml at '/Users/alancoding/Documents/tower/awx/plugins/collections/ansible_collections/community/vmware/galaxy.yml' is missing the following mandatory keys: version
```
It's bad that the command terminates so fragilely. This also just doesn't jive with the common, published, policies of first-class collections.
It's also bad that it won't give a traceback for the error that happened.
FYI to @Akasurde (I'd expect no change in vmware collection) @jborean93 (most likely expect change in CLI error handling)
|
https://github.com/ansible/ansible/issues/70180
|
https://github.com/ansible/ansible/pull/76596
|
9b79d6ba3582bb0ef339738b7e1ade879d00dfe3
|
05608b20e8f875d51866a184f8c579fe60498e05
| 2020-06-19T19:09:14Z |
python
| 2022-08-02T15:46:47Z |
test/units/galaxy/test_collection.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import pytest
import re
import tarfile
import tempfile
import uuid
from hashlib import sha256
from io import BytesIO
from unittest.mock import MagicMock, mock_open, patch
import ansible.constants as C
from ansible import context
from ansible.cli.galaxy import GalaxyCLI, SERVER_DEF
from ansible.errors import AnsibleError
from ansible.galaxy import api, collection, token
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.six.moves import builtins
from ansible.utils import context_objects as co
from ansible.utils.display import Display
from ansible.utils.hashing import secure_hash_s
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
@pytest.fixture()
def collection_input(tmp_path_factory):
''' Creates a collection skeleton directory for build tests '''
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
namespace = 'ansible_namespace'
collection = 'collection'
skeleton = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton')
galaxy_args = ['ansible-galaxy', 'collection', 'init', '%s.%s' % (namespace, collection),
'-c', '--init-path', test_dir, '--collection-skeleton', skeleton]
GalaxyCLI(args=galaxy_args).run()
collection_dir = os.path.join(test_dir, namespace, collection)
output_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Output'))
return collection_dir, output_dir
@pytest.fixture()
def collection_artifact(monkeypatch, tmp_path_factory):
''' Creates a temp collection artifact and mocked open_url instance for publishing tests '''
mock_open = MagicMock()
monkeypatch.setattr(collection.concrete_artifact_manager, 'open_url', mock_open)
mock_uuid = MagicMock()
mock_uuid.return_value.hex = 'uuid'
monkeypatch.setattr(uuid, 'uuid4', mock_uuid)
tmp_path = tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')
input_file = to_text(tmp_path / 'collection.tar.gz')
with tarfile.open(input_file, 'w:gz') as tfile:
b_io = BytesIO(b"\x00\x01\x02\x03")
tar_info = tarfile.TarInfo('test')
tar_info.size = 4
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
return input_file, mock_open
@pytest.fixture()
def galaxy_yml_dir(request, tmp_path_factory):
b_test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections'))
b_galaxy_yml = os.path.join(b_test_dir, b'galaxy.yml')
with open(b_galaxy_yml, 'wb') as galaxy_obj:
galaxy_obj.write(to_bytes(request.param))
yield b_test_dir
@pytest.fixture()
def tmp_tarfile(tmp_path_factory, manifest_info):
''' Creates a temporary tar file for _extract_tar_file tests '''
filename = u'ÅÑŚÌβŁÈ'
temp_dir = to_bytes(tmp_path_factory.mktemp('test-%s Collections' % to_native(filename)))
tar_file = os.path.join(temp_dir, to_bytes('%s.tar.gz' % filename))
data = os.urandom(8)
with tarfile.open(tar_file, 'w:gz') as tfile:
b_io = BytesIO(data)
tar_info = tarfile.TarInfo(filename)
tar_info.size = len(data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
b_data = to_bytes(json.dumps(manifest_info, indent=True), errors='surrogate_or_strict')
b_io = BytesIO(b_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(b_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
sha256_hash = sha256()
sha256_hash.update(data)
with tarfile.open(tar_file, 'r') as tfile:
yield temp_dir, tfile, filename, sha256_hash.hexdigest()
@pytest.fixture()
def galaxy_server():
context.CLIARGS._store = {'ignore_certs': False}
galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com',
token=token.GalaxyToken(token='key'))
return galaxy_api
@pytest.fixture()
def manifest_template():
def get_manifest_info(namespace='ansible_namespace', name='collection', version='0.1.0'):
return {
"collection_info": {
"namespace": namespace,
"name": name,
"version": version,
"authors": [
"shertel"
],
"readme": "README.md",
"tags": [
"test",
"collection"
],
"description": "Test",
"license": [
"MIT"
],
"license_file": None,
"dependencies": {},
"repository": "https://github.com/{0}/{1}".format(namespace, name),
"documentation": None,
"homepage": None,
"issues": None
},
"file_manifest_file": {
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "files_manifest_checksum",
"format": 1
},
"format": 1
}
return get_manifest_info
@pytest.fixture()
def manifest_info(manifest_template):
return manifest_template()
@pytest.fixture()
def files_manifest_info():
return {
"files": [
{
"name": ".",
"ftype": "dir",
"chksum_type": None,
"chksum_sha256": None,
"format": 1
},
{
"name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "individual_file_checksum",
"format": 1
}
],
"format": 1}
@pytest.fixture()
def manifest(manifest_info):
b_data = to_bytes(json.dumps(manifest_info))
with patch.object(builtins, 'open', mock_open(read_data=b_data)) as m:
with open('MANIFEST.json', mode='rb') as fake_file:
yield fake_file, sha256(b_data).hexdigest()
@pytest.fixture()
def server_config(monkeypatch):
monkeypatch.setattr(C, 'GALAXY_SERVER_LIST', ['server1', 'server2', 'server3'])
default_options = dict((k, None) for k, v, t in SERVER_DEF)
server1 = dict(default_options)
server1.update({'url': 'https://galaxy.ansible.com/api/', 'validate_certs': False})
server2 = dict(default_options)
server2.update({'url': 'https://galaxy.ansible.com/api/', 'validate_certs': True})
server3 = dict(default_options)
server3.update({'url': 'https://galaxy.ansible.com/api/'})
return server1, server2, server3
@pytest.mark.parametrize(
'required_signature_count,valid',
[
("1", True),
("+1", True),
("all", True),
("+all", True),
("-1", False),
("invalid", False),
("1.5", False),
("+", False),
]
)
def test_cli_options(required_signature_count, valid, monkeypatch):
cli_args = [
'ansible-galaxy',
'collection',
'install',
'namespace.collection:1.0.0',
'--keyring',
'~/.ansible/pubring.kbx',
'--required-valid-signature-count',
required_signature_count
]
galaxy_cli = GalaxyCLI(args=cli_args)
mock_execute_install = MagicMock()
monkeypatch.setattr(galaxy_cli, '_execute_install_collection', mock_execute_install)
if valid:
galaxy_cli.run()
else:
with pytest.raises(SystemExit, match='2') as error:
galaxy_cli.run()
@pytest.mark.parametrize(
"config,server",
[
(
# Options to create ini config
{
'url': 'https://galaxy.ansible.com',
'validate_certs': 'False',
'v3': 'False',
},
# Expected server attributes
{
'validate_certs': False,
'_available_api_versions': {},
},
),
(
{
'url': 'https://galaxy.ansible.com',
'validate_certs': 'True',
'v3': 'True',
},
{
'validate_certs': True,
'_available_api_versions': {'v3': '/v3'},
},
),
],
)
def test_bool_type_server_config_options(config, server, monkeypatch):
cli_args = [
'ansible-galaxy',
'collection',
'install',
'namespace.collection:1.0.0',
]
config_lines = [
"[galaxy]",
"server_list=server1\n",
"[galaxy_server.server1]",
"url=%s" % config['url'],
"v3=%s" % config['v3'],
"validate_certs=%s\n" % config['validate_certs'],
]
with tempfile.NamedTemporaryFile(suffix='.cfg') as tmp_file:
tmp_file.write(
to_bytes('\n'.join(config_lines))
)
tmp_file.flush()
with patch.object(C, 'GALAXY_SERVER_LIST', ['server1']):
with patch.object(C.config, '_config_file', tmp_file.name):
C.config._parse_config_file()
galaxy_cli = GalaxyCLI(args=cli_args)
mock_execute_install = MagicMock()
monkeypatch.setattr(galaxy_cli, '_execute_install_collection', mock_execute_install)
galaxy_cli.run()
assert galaxy_cli.api_servers[0].name == 'server1'
assert galaxy_cli.api_servers[0].validate_certs == server['validate_certs']
assert galaxy_cli.api_servers[0]._available_api_versions == server['_available_api_versions']
@pytest.mark.parametrize('global_ignore_certs', [True, False])
def test_validate_certs(global_ignore_certs, monkeypatch):
cli_args = [
'ansible-galaxy',
'collection',
'install',
'namespace.collection:1.0.0',
]
if global_ignore_certs:
cli_args.append('--ignore-certs')
galaxy_cli = GalaxyCLI(args=cli_args)
mock_execute_install = MagicMock()
monkeypatch.setattr(galaxy_cli, '_execute_install_collection', mock_execute_install)
galaxy_cli.run()
assert len(galaxy_cli.api_servers) == 1
assert galaxy_cli.api_servers[0].validate_certs is not global_ignore_certs
@pytest.mark.parametrize('global_ignore_certs', [True, False])
def test_validate_certs_with_server_url(global_ignore_certs, monkeypatch):
cli_args = [
'ansible-galaxy',
'collection',
'install',
'namespace.collection:1.0.0',
'-s',
'https://galaxy.ansible.com'
]
if global_ignore_certs:
cli_args.append('--ignore-certs')
galaxy_cli = GalaxyCLI(args=cli_args)
mock_execute_install = MagicMock()
monkeypatch.setattr(galaxy_cli, '_execute_install_collection', mock_execute_install)
galaxy_cli.run()
assert len(galaxy_cli.api_servers) == 1
assert galaxy_cli.api_servers[0].validate_certs is not global_ignore_certs
@pytest.mark.parametrize('global_ignore_certs', [True, False])
def test_validate_certs_with_server_config(global_ignore_certs, server_config, monkeypatch):
# test sidesteps real resolution and forces the server config to override the cli option
get_plugin_options = MagicMock(side_effect=server_config)
monkeypatch.setattr(C.config, 'get_plugin_options', get_plugin_options)
cli_args = [
'ansible-galaxy',
'collection',
'install',
'namespace.collection:1.0.0',
]
if global_ignore_certs:
cli_args.append('--ignore-certs')
galaxy_cli = GalaxyCLI(args=cli_args)
mock_execute_install = MagicMock()
monkeypatch.setattr(galaxy_cli, '_execute_install_collection', mock_execute_install)
galaxy_cli.run()
# server cfg, so should match def above, if not specified so it should use default (true)
assert galaxy_cli.api_servers[0].validate_certs is server_config[0].get('validate_certs', True)
assert galaxy_cli.api_servers[1].validate_certs is server_config[1].get('validate_certs', True)
assert galaxy_cli.api_servers[2].validate_certs is server_config[2].get('validate_certs', True)
def test_build_collection_no_galaxy_yaml():
fake_path = u'/fake/ÅÑŚÌβŁÈ/path'
expected = to_native("The collection galaxy.yml path '%s/galaxy.yml' does not exist." % fake_path)
with pytest.raises(AnsibleError, match=expected):
collection.build_collection(fake_path, u'output', False)
def test_build_existing_output_file(collection_input):
input_dir, output_dir = collection_input
existing_output_dir = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz')
os.makedirs(existing_output_dir)
expected = "The output collection artifact '%s' already exists, but is a directory - aborting" \
% to_native(existing_output_dir)
with pytest.raises(AnsibleError, match=expected):
collection.build_collection(to_text(input_dir, errors='surrogate_or_strict'), to_text(output_dir, errors='surrogate_or_strict'), False)
def test_build_existing_output_without_force(collection_input):
input_dir, output_dir = collection_input
existing_output = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz')
with open(existing_output, 'w+') as out_file:
out_file.write("random garbage")
out_file.flush()
expected = "The file '%s' already exists. You can use --force to re-create the collection artifact." \
% to_native(existing_output)
with pytest.raises(AnsibleError, match=expected):
collection.build_collection(to_text(input_dir, errors='surrogate_or_strict'), to_text(output_dir, errors='surrogate_or_strict'), False)
def test_build_existing_output_with_force(collection_input):
input_dir, output_dir = collection_input
existing_output = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz')
with open(existing_output, 'w+') as out_file:
out_file.write("random garbage")
out_file.flush()
collection.build_collection(to_text(input_dir, errors='surrogate_or_strict'), to_text(output_dir, errors='surrogate_or_strict'), True)
# Verify the file was replaced with an actual tar file
assert tarfile.is_tarfile(existing_output)
def test_build_with_existing_files_and_manifest(collection_input):
input_dir, output_dir = collection_input
with open(os.path.join(input_dir, 'MANIFEST.json'), "wb") as fd:
fd.write(b'{"collection_info": {"version": "6.6.6"}, "version": 1}')
with open(os.path.join(input_dir, 'FILES.json'), "wb") as fd:
fd.write(b'{"files": [], "format": 1}')
with open(os.path.join(input_dir, "plugins", "MANIFEST.json"), "wb") as fd:
fd.write(b"test data that should be in build")
collection.build_collection(to_text(input_dir, errors='surrogate_or_strict'), to_text(output_dir, errors='surrogate_or_strict'), False)
output_artifact = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz')
assert tarfile.is_tarfile(output_artifact)
with tarfile.open(output_artifact, mode='r') as actual:
members = actual.getmembers()
manifest_file = next(m for m in members if m.path == "MANIFEST.json")
manifest_file_obj = actual.extractfile(manifest_file.name)
manifest_file_text = manifest_file_obj.read()
manifest_file_obj.close()
assert manifest_file_text != b'{"collection_info": {"version": "6.6.6"}, "version": 1}'
json_file = next(m for m in members if m.path == "MANIFEST.json")
json_file_obj = actual.extractfile(json_file.name)
json_file_text = json_file_obj.read()
json_file_obj.close()
assert json_file_text != b'{"files": [], "format": 1}'
sub_manifest_file = next(m for m in members if m.path == "plugins/MANIFEST.json")
sub_manifest_file_obj = actual.extractfile(sub_manifest_file.name)
sub_manifest_file_text = sub_manifest_file_obj.read()
sub_manifest_file_obj.close()
assert sub_manifest_file_text == b"test data that should be in build"
@pytest.mark.parametrize('galaxy_yml_dir', [b'namespace: value: broken'], indirect=True)
def test_invalid_yaml_galaxy_file(galaxy_yml_dir):
galaxy_file = os.path.join(galaxy_yml_dir, b'galaxy.yml')
expected = to_native(b"Failed to parse the galaxy.yml at '%s' with the following error:" % galaxy_file)
with pytest.raises(AnsibleError, match=expected):
collection.concrete_artifact_manager._get_meta_from_src_dir(galaxy_yml_dir)
@pytest.mark.parametrize('galaxy_yml_dir', [b'namespace: test_namespace'], indirect=True)
def test_missing_required_galaxy_key(galaxy_yml_dir):
galaxy_file = os.path.join(galaxy_yml_dir, b'galaxy.yml')
expected = "The collection galaxy.yml at '%s' is missing the following mandatory keys: authors, name, " \
"readme, version" % to_native(galaxy_file)
with pytest.raises(AnsibleError, match=expected):
collection.concrete_artifact_manager._get_meta_from_src_dir(galaxy_yml_dir)
@pytest.mark.parametrize('galaxy_yml_dir', [b"""
namespace: namespace
name: collection
authors: Jordan
version: 0.1.0
readme: README.md
invalid: value"""], indirect=True)
def test_warning_extra_keys(galaxy_yml_dir, monkeypatch):
display_mock = MagicMock()
monkeypatch.setattr(Display, 'warning', display_mock)
collection.concrete_artifact_manager._get_meta_from_src_dir(galaxy_yml_dir)
assert display_mock.call_count == 1
assert display_mock.call_args[0][0] == "Found unknown keys in collection galaxy.yml at '%s/galaxy.yml': invalid"\
% to_text(galaxy_yml_dir)
@pytest.mark.parametrize('galaxy_yml_dir', [b"""
namespace: namespace
name: collection
authors: Jordan
version: 0.1.0
readme: README.md"""], indirect=True)
def test_defaults_galaxy_yml(galaxy_yml_dir):
actual = collection.concrete_artifact_manager._get_meta_from_src_dir(galaxy_yml_dir)
assert actual['namespace'] == 'namespace'
assert actual['name'] == 'collection'
assert actual['authors'] == ['Jordan']
assert actual['version'] == '0.1.0'
assert actual['readme'] == 'README.md'
assert actual['description'] is None
assert actual['repository'] is None
assert actual['documentation'] is None
assert actual['homepage'] is None
assert actual['issues'] is None
assert actual['tags'] == []
assert actual['dependencies'] == {}
assert actual['license'] == []
@pytest.mark.parametrize('galaxy_yml_dir', [(b"""
namespace: namespace
name: collection
authors: Jordan
version: 0.1.0
readme: README.md
license: MIT"""), (b"""
namespace: namespace
name: collection
authors: Jordan
version: 0.1.0
readme: README.md
license:
- MIT""")], indirect=True)
def test_galaxy_yml_list_value(galaxy_yml_dir):
actual = collection.concrete_artifact_manager._get_meta_from_src_dir(galaxy_yml_dir)
assert actual['license'] == ['MIT']
def test_build_ignore_files_and_folders(collection_input, monkeypatch):
input_dir = collection_input[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_display)
git_folder = os.path.join(input_dir, '.git')
retry_file = os.path.join(input_dir, 'ansible.retry')
tests_folder = os.path.join(input_dir, 'tests', 'output')
tests_output_file = os.path.join(tests_folder, 'result.txt')
os.makedirs(git_folder)
os.makedirs(tests_folder)
with open(retry_file, 'w+') as ignore_file:
ignore_file.write('random')
ignore_file.flush()
with open(tests_output_file, 'w+') as tests_file:
tests_file.write('random')
tests_file.flush()
actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection', [])
assert actual['format'] == 1
for manifest_entry in actual['files']:
assert manifest_entry['name'] not in ['.git', 'ansible.retry', 'galaxy.yml', 'tests/output', 'tests/output/result.txt']
expected_msgs = [
"Skipping '%s/galaxy.yml' for collection build" % to_text(input_dir),
"Skipping '%s' for collection build" % to_text(retry_file),
"Skipping '%s' for collection build" % to_text(git_folder),
"Skipping '%s' for collection build" % to_text(tests_folder),
]
assert mock_display.call_count == 4
assert mock_display.mock_calls[0][1][0] in expected_msgs
assert mock_display.mock_calls[1][1][0] in expected_msgs
assert mock_display.mock_calls[2][1][0] in expected_msgs
assert mock_display.mock_calls[3][1][0] in expected_msgs
def test_build_ignore_older_release_in_root(collection_input, monkeypatch):
input_dir = collection_input[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_display)
# This is expected to be ignored because it is in the root collection dir.
release_file = os.path.join(input_dir, 'namespace-collection-0.0.0.tar.gz')
# This is not expected to be ignored because it is not in the root collection dir.
fake_release_file = os.path.join(input_dir, 'plugins', 'namespace-collection-0.0.0.tar.gz')
for filename in [release_file, fake_release_file]:
with open(filename, 'w+') as file_obj:
file_obj.write('random')
file_obj.flush()
actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection', [])
assert actual['format'] == 1
plugin_release_found = False
for manifest_entry in actual['files']:
assert manifest_entry['name'] != 'namespace-collection-0.0.0.tar.gz'
if manifest_entry['name'] == 'plugins/namespace-collection-0.0.0.tar.gz':
plugin_release_found = True
assert plugin_release_found
expected_msgs = [
"Skipping '%s/galaxy.yml' for collection build" % to_text(input_dir),
"Skipping '%s' for collection build" % to_text(release_file)
]
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] in expected_msgs
assert mock_display.mock_calls[1][1][0] in expected_msgs
def test_build_ignore_patterns(collection_input, monkeypatch):
input_dir = collection_input[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_display)
actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection',
['*.md', 'plugins/action', 'playbooks/*.j2'])
assert actual['format'] == 1
expected_missing = [
'README.md',
'docs/My Collection.md',
'plugins/action',
'playbooks/templates/test.conf.j2',
'playbooks/templates/subfolder/test.conf.j2',
]
# Files or dirs that are close to a match but are not, make sure they are present
expected_present = [
'docs',
'roles/common/templates/test.conf.j2',
'roles/common/templates/subfolder/test.conf.j2',
]
actual_files = [e['name'] for e in actual['files']]
for m in expected_missing:
assert m not in actual_files
for p in expected_present:
assert p in actual_files
expected_msgs = [
"Skipping '%s/galaxy.yml' for collection build" % to_text(input_dir),
"Skipping '%s/README.md' for collection build" % to_text(input_dir),
"Skipping '%s/docs/My Collection.md' for collection build" % to_text(input_dir),
"Skipping '%s/plugins/action' for collection build" % to_text(input_dir),
"Skipping '%s/playbooks/templates/test.conf.j2' for collection build" % to_text(input_dir),
"Skipping '%s/playbooks/templates/subfolder/test.conf.j2' for collection build" % to_text(input_dir),
]
assert mock_display.call_count == len(expected_msgs)
assert mock_display.mock_calls[0][1][0] in expected_msgs
assert mock_display.mock_calls[1][1][0] in expected_msgs
assert mock_display.mock_calls[2][1][0] in expected_msgs
assert mock_display.mock_calls[3][1][0] in expected_msgs
assert mock_display.mock_calls[4][1][0] in expected_msgs
assert mock_display.mock_calls[5][1][0] in expected_msgs
def test_build_ignore_symlink_target_outside_collection(collection_input, monkeypatch):
input_dir, outside_dir = collection_input
mock_display = MagicMock()
monkeypatch.setattr(Display, 'warning', mock_display)
link_path = os.path.join(input_dir, 'plugins', 'connection')
os.symlink(outside_dir, link_path)
actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection', [])
for manifest_entry in actual['files']:
assert manifest_entry['name'] != 'plugins/connection'
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == "Skipping '%s' as it is a symbolic link to a directory outside " \
"the collection" % to_text(link_path)
def test_build_copy_symlink_target_inside_collection(collection_input):
input_dir = collection_input[0]
os.makedirs(os.path.join(input_dir, 'playbooks', 'roles'))
roles_link = os.path.join(input_dir, 'playbooks', 'roles', 'linked')
roles_target = os.path.join(input_dir, 'roles', 'linked')
roles_target_tasks = os.path.join(roles_target, 'tasks')
os.makedirs(roles_target_tasks)
with open(os.path.join(roles_target_tasks, 'main.yml'), 'w+') as tasks_main:
tasks_main.write("---\n- hosts: localhost\n tasks:\n - ping:")
tasks_main.flush()
os.symlink(roles_target, roles_link)
actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection', [])
linked_entries = [e for e in actual['files'] if e['name'].startswith('playbooks/roles/linked')]
assert len(linked_entries) == 1
assert linked_entries[0]['name'] == 'playbooks/roles/linked'
assert linked_entries[0]['ftype'] == 'dir'
def test_build_with_symlink_inside_collection(collection_input):
input_dir, output_dir = collection_input
os.makedirs(os.path.join(input_dir, 'playbooks', 'roles'))
roles_link = os.path.join(input_dir, 'playbooks', 'roles', 'linked')
file_link = os.path.join(input_dir, 'docs', 'README.md')
roles_target = os.path.join(input_dir, 'roles', 'linked')
roles_target_tasks = os.path.join(roles_target, 'tasks')
os.makedirs(roles_target_tasks)
with open(os.path.join(roles_target_tasks, 'main.yml'), 'w+') as tasks_main:
tasks_main.write("---\n- hosts: localhost\n tasks:\n - ping:")
tasks_main.flush()
os.symlink(roles_target, roles_link)
os.symlink(os.path.join(input_dir, 'README.md'), file_link)
collection.build_collection(to_text(input_dir, errors='surrogate_or_strict'), to_text(output_dir, errors='surrogate_or_strict'), False)
output_artifact = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz')
assert tarfile.is_tarfile(output_artifact)
with tarfile.open(output_artifact, mode='r') as actual:
members = actual.getmembers()
linked_folder = next(m for m in members if m.path == 'playbooks/roles/linked')
assert linked_folder.type == tarfile.SYMTYPE
assert linked_folder.linkname == '../../roles/linked'
linked_file = next(m for m in members if m.path == 'docs/README.md')
assert linked_file.type == tarfile.SYMTYPE
assert linked_file.linkname == '../README.md'
linked_file_obj = actual.extractfile(linked_file.name)
actual_file = secure_hash_s(linked_file_obj.read())
linked_file_obj.close()
assert actual_file == '63444bfc766154e1bc7557ef6280de20d03fcd81'
def test_publish_no_wait(galaxy_server, collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
artifact_path, mock_open = collection_artifact
fake_import_uri = 'https://galaxy.server.com/api/v2/import/1234'
mock_publish = MagicMock()
mock_publish.return_value = fake_import_uri
monkeypatch.setattr(galaxy_server, 'publish_collection', mock_publish)
collection.publish_collection(artifact_path, galaxy_server, False, 0)
assert mock_publish.call_count == 1
assert mock_publish.mock_calls[0][1][0] == artifact_path
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == \
"Collection has been pushed to the Galaxy server %s %s, not waiting until import has completed due to " \
"--no-wait being set. Import task results can be found at %s" % (galaxy_server.name, galaxy_server.api_server,
fake_import_uri)
def test_publish_with_wait(galaxy_server, collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
artifact_path, mock_open = collection_artifact
fake_import_uri = 'https://galaxy.server.com/api/v2/import/1234'
mock_publish = MagicMock()
mock_publish.return_value = fake_import_uri
monkeypatch.setattr(galaxy_server, 'publish_collection', mock_publish)
mock_wait = MagicMock()
monkeypatch.setattr(galaxy_server, 'wait_import_task', mock_wait)
collection.publish_collection(artifact_path, galaxy_server, True, 0)
assert mock_publish.call_count == 1
assert mock_publish.mock_calls[0][1][0] == artifact_path
assert mock_wait.call_count == 1
assert mock_wait.mock_calls[0][1][0] == '1234'
assert mock_display.mock_calls[0][1][0] == "Collection has been published to the Galaxy server test_server %s" \
% galaxy_server.api_server
def test_find_existing_collections(tmp_path_factory, monkeypatch):
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
collection1 = os.path.join(test_dir, 'namespace1', 'collection1')
collection2 = os.path.join(test_dir, 'namespace2', 'collection2')
fake_collection1 = os.path.join(test_dir, 'namespace3', 'collection3')
fake_collection2 = os.path.join(test_dir, 'namespace4')
os.makedirs(collection1)
os.makedirs(collection2)
os.makedirs(os.path.split(fake_collection1)[0])
open(fake_collection1, 'wb+').close()
open(fake_collection2, 'wb+').close()
collection1_manifest = json.dumps({
'collection_info': {
'namespace': 'namespace1',
'name': 'collection1',
'version': '1.2.3',
'authors': ['Jordan Borean'],
'readme': 'README.md',
'dependencies': {},
},
'format': 1,
})
with open(os.path.join(collection1, 'MANIFEST.json'), 'wb') as manifest_obj:
manifest_obj.write(to_bytes(collection1_manifest))
mock_warning = MagicMock()
monkeypatch.setattr(Display, 'warning', mock_warning)
actual = list(collection.find_existing_collections(test_dir, artifacts_manager=concrete_artifact_cm))
assert len(actual) == 2
for actual_collection in actual:
if '%s.%s' % (actual_collection.namespace, actual_collection.name) == 'namespace1.collection1':
assert actual_collection.namespace == 'namespace1'
assert actual_collection.name == 'collection1'
assert actual_collection.ver == '1.2.3'
assert to_text(actual_collection.src) == collection1
else:
assert actual_collection.namespace == 'namespace2'
assert actual_collection.name == 'collection2'
assert actual_collection.ver == '*'
assert to_text(actual_collection.src) == collection2
assert mock_warning.call_count == 1
assert mock_warning.mock_calls[0][1][0] == "Collection at '%s' does not have a MANIFEST.json file, nor has it galaxy.yml: " \
"cannot detect version." % to_text(collection2)
def test_download_file(tmp_path_factory, monkeypatch):
temp_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections'))
data = b"\x00\x01\x02\x03"
sha256_hash = sha256()
sha256_hash.update(data)
mock_open = MagicMock()
mock_open.return_value = BytesIO(data)
monkeypatch.setattr(collection.concrete_artifact_manager, 'open_url', mock_open)
expected = temp_dir
actual = collection._download_file('http://google.com/file', temp_dir, sha256_hash.hexdigest(), True)
assert actual.startswith(expected)
assert os.path.isfile(actual)
with open(actual, 'rb') as file_obj:
assert file_obj.read() == data
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == 'http://google.com/file'
def test_download_file_hash_mismatch(tmp_path_factory, monkeypatch):
temp_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections'))
data = b"\x00\x01\x02\x03"
mock_open = MagicMock()
mock_open.return_value = BytesIO(data)
monkeypatch.setattr(collection.concrete_artifact_manager, 'open_url', mock_open)
expected = "Mismatch artifact hash with downloaded file"
with pytest.raises(AnsibleError, match=expected):
collection._download_file('http://google.com/file', temp_dir, 'bad', True)
def test_extract_tar_file_invalid_hash(tmp_tarfile):
temp_dir, tfile, filename, dummy = tmp_tarfile
expected = "Checksum mismatch for '%s' inside collection at '%s'" % (to_native(filename), to_native(tfile.name))
with pytest.raises(AnsibleError, match=expected):
collection._extract_tar_file(tfile, filename, temp_dir, temp_dir, "fakehash")
def test_extract_tar_file_missing_member(tmp_tarfile):
temp_dir, tfile, dummy, dummy = tmp_tarfile
expected = "Collection tar at '%s' does not contain the expected file 'missing'." % to_native(tfile.name)
with pytest.raises(AnsibleError, match=expected):
collection._extract_tar_file(tfile, 'missing', temp_dir, temp_dir)
def test_extract_tar_file_missing_parent_dir(tmp_tarfile):
temp_dir, tfile, filename, checksum = tmp_tarfile
output_dir = os.path.join(temp_dir, b'output')
output_file = os.path.join(output_dir, to_bytes(filename))
collection._extract_tar_file(tfile, filename, output_dir, temp_dir, checksum)
os.path.isfile(output_file)
def test_extract_tar_file_outside_dir(tmp_path_factory):
filename = u'ÅÑŚÌβŁÈ'
temp_dir = to_bytes(tmp_path_factory.mktemp('test-%s Collections' % to_native(filename)))
tar_file = os.path.join(temp_dir, to_bytes('%s.tar.gz' % filename))
data = os.urandom(8)
tar_filename = '../%s.sh' % filename
with tarfile.open(tar_file, 'w:gz') as tfile:
b_io = BytesIO(data)
tar_info = tarfile.TarInfo(tar_filename)
tar_info.size = len(data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
expected = re.escape("Cannot extract tar entry '%s' as it will be placed outside the collection directory"
% to_native(tar_filename))
with tarfile.open(tar_file, 'r') as tfile:
with pytest.raises(AnsibleError, match=expected):
collection._extract_tar_file(tfile, tar_filename, os.path.join(temp_dir, to_bytes(filename)), temp_dir)
def test_require_one_of_collections_requirements_with_both():
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'verify', 'namespace.collection', '-r', 'requirements.yml'])
with pytest.raises(AnsibleError) as req_err:
cli._require_one_of_collections_requirements(('namespace.collection',), 'requirements.yml')
with pytest.raises(AnsibleError) as cli_err:
cli.run()
assert req_err.value.message == cli_err.value.message == 'The positional collection_name arg and --requirements-file are mutually exclusive.'
def test_require_one_of_collections_requirements_with_neither():
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'verify'])
with pytest.raises(AnsibleError) as req_err:
cli._require_one_of_collections_requirements((), '')
with pytest.raises(AnsibleError) as cli_err:
cli.run()
assert req_err.value.message == cli_err.value.message == 'You must specify a collection name or a requirements file.'
def test_require_one_of_collections_requirements_with_collections():
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'verify', 'namespace1.collection1', 'namespace2.collection1:1.0.0'])
collections = ('namespace1.collection1', 'namespace2.collection1:1.0.0',)
requirements = cli._require_one_of_collections_requirements(collections, '')['collections']
req_tuples = [('%s.%s' % (req.namespace, req.name), req.ver, req.src, req.type,) for req in requirements]
assert req_tuples == [('namespace1.collection1', '*', None, 'galaxy'), ('namespace2.collection1', '1.0.0', None, 'galaxy')]
@patch('ansible.cli.galaxy.GalaxyCLI._parse_requirements_file')
def test_require_one_of_collections_requirements_with_requirements(mock_parse_requirements_file, galaxy_server):
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'verify', '-r', 'requirements.yml', 'namespace.collection'])
mock_parse_requirements_file.return_value = {'collections': [('namespace.collection', '1.0.5', galaxy_server)]}
requirements = cli._require_one_of_collections_requirements((), 'requirements.yml')['collections']
assert mock_parse_requirements_file.call_count == 1
assert requirements == [('namespace.collection', '1.0.5', galaxy_server)]
@patch('ansible.cli.galaxy.GalaxyCLI.execute_verify', spec=True)
def test_call_GalaxyCLI(execute_verify):
galaxy_args = ['ansible-galaxy', 'collection', 'verify', 'namespace.collection']
GalaxyCLI(args=galaxy_args).run()
assert execute_verify.call_count == 1
@patch('ansible.cli.galaxy.GalaxyCLI.execute_verify')
def test_call_GalaxyCLI_with_implicit_role(execute_verify):
galaxy_args = ['ansible-galaxy', 'verify', 'namespace.implicit_role']
with pytest.raises(SystemExit):
GalaxyCLI(args=galaxy_args).run()
assert not execute_verify.called
@patch('ansible.cli.galaxy.GalaxyCLI.execute_verify')
def test_call_GalaxyCLI_with_role(execute_verify):
galaxy_args = ['ansible-galaxy', 'role', 'verify', 'namespace.role']
with pytest.raises(SystemExit):
GalaxyCLI(args=galaxy_args).run()
assert not execute_verify.called
@patch('ansible.cli.galaxy.verify_collections', spec=True)
def test_execute_verify_with_defaults(mock_verify_collections):
galaxy_args = ['ansible-galaxy', 'collection', 'verify', 'namespace.collection:1.0.4']
GalaxyCLI(args=galaxy_args).run()
assert mock_verify_collections.call_count == 1
print("Call args {0}".format(mock_verify_collections.call_args[0]))
requirements, search_paths, galaxy_apis, ignore_errors = mock_verify_collections.call_args[0]
assert [('%s.%s' % (r.namespace, r.name), r.ver, r.src, r.type) for r in requirements] == [('namespace.collection', '1.0.4', None, 'galaxy')]
for install_path in search_paths:
assert install_path.endswith('ansible_collections')
assert galaxy_apis[0].api_server == 'https://galaxy.ansible.com'
assert ignore_errors is False
@patch('ansible.cli.galaxy.verify_collections', spec=True)
def test_execute_verify(mock_verify_collections):
GalaxyCLI(args=[
'ansible-galaxy', 'collection', 'verify', 'namespace.collection:1.0.4', '--ignore-certs',
'-p', '~/.ansible', '--ignore-errors', '--server', 'http://galaxy-dev.com',
]).run()
assert mock_verify_collections.call_count == 1
requirements, search_paths, galaxy_apis, ignore_errors = mock_verify_collections.call_args[0]
assert [('%s.%s' % (r.namespace, r.name), r.ver, r.src, r.type) for r in requirements] == [('namespace.collection', '1.0.4', None, 'galaxy')]
for install_path in search_paths:
assert install_path.endswith('ansible_collections')
assert galaxy_apis[0].api_server == 'http://galaxy-dev.com'
assert ignore_errors is True
def test_verify_file_hash_deleted_file(manifest_info):
data = to_bytes(json.dumps(manifest_info))
digest = sha256(data).hexdigest()
namespace = manifest_info['collection_info']['namespace']
name = manifest_info['collection_info']['name']
version = manifest_info['collection_info']['version']
server = 'http://galaxy.ansible.com'
error_queue = []
with patch.object(builtins, 'open', mock_open(read_data=data)) as m:
with patch.object(collection.os.path, 'isfile', MagicMock(return_value=False)) as mock_isfile:
collection._verify_file_hash(b'path/', 'file', digest, error_queue)
assert mock_isfile.called_once
assert len(error_queue) == 1
assert error_queue[0].installed is None
assert error_queue[0].expected == digest
def test_verify_file_hash_matching_hash(manifest_info):
data = to_bytes(json.dumps(manifest_info))
digest = sha256(data).hexdigest()
namespace = manifest_info['collection_info']['namespace']
name = manifest_info['collection_info']['name']
version = manifest_info['collection_info']['version']
server = 'http://galaxy.ansible.com'
error_queue = []
with patch.object(builtins, 'open', mock_open(read_data=data)) as m:
with patch.object(collection.os.path, 'isfile', MagicMock(return_value=True)) as mock_isfile:
collection._verify_file_hash(b'path/', 'file', digest, error_queue)
assert mock_isfile.called_once
assert error_queue == []
def test_verify_file_hash_mismatching_hash(manifest_info):
data = to_bytes(json.dumps(manifest_info))
digest = sha256(data).hexdigest()
different_digest = 'not_{0}'.format(digest)
namespace = manifest_info['collection_info']['namespace']
name = manifest_info['collection_info']['name']
version = manifest_info['collection_info']['version']
server = 'http://galaxy.ansible.com'
error_queue = []
with patch.object(builtins, 'open', mock_open(read_data=data)) as m:
with patch.object(collection.os.path, 'isfile', MagicMock(return_value=True)) as mock_isfile:
collection._verify_file_hash(b'path/', 'file', different_digest, error_queue)
assert mock_isfile.called_once
assert len(error_queue) == 1
assert error_queue[0].installed == digest
assert error_queue[0].expected == different_digest
def test_consume_file(manifest):
manifest_file, checksum = manifest
assert checksum == collection._consume_file(manifest_file)
def test_consume_file_and_write_contents(manifest, manifest_info):
manifest_file, checksum = manifest
write_to = BytesIO()
actual_hash = collection._consume_file(manifest_file, write_to)
write_to.seek(0)
assert to_bytes(json.dumps(manifest_info)) == write_to.read()
assert actual_hash == checksum
def test_get_tar_file_member(tmp_tarfile):
temp_dir, tfile, filename, checksum = tmp_tarfile
with collection._get_tar_file_member(tfile, filename) as (tar_file_member, tar_file_obj):
assert isinstance(tar_file_member, tarfile.TarInfo)
assert isinstance(tar_file_obj, tarfile.ExFileObject)
def test_get_nonexistent_tar_file_member(tmp_tarfile):
temp_dir, tfile, filename, checksum = tmp_tarfile
file_does_not_exist = filename + 'nonexistent'
with pytest.raises(AnsibleError) as err:
collection._get_tar_file_member(tfile, file_does_not_exist)
assert to_text(err.value.message) == "Collection tar at '%s' does not contain the expected file '%s'." % (to_text(tfile.name), file_does_not_exist)
def test_get_tar_file_hash(tmp_tarfile):
temp_dir, tfile, filename, checksum = tmp_tarfile
assert checksum == collection._get_tar_file_hash(tfile.name, filename)
def test_get_json_from_tar_file(tmp_tarfile):
temp_dir, tfile, filename, checksum = tmp_tarfile
assert 'MANIFEST.json' in tfile.getnames()
data = collection._get_json_from_tar_file(tfile.name, 'MANIFEST.json')
assert isinstance(data, dict)
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,283 |
CI started to fail because of locale error against devel
|
### Summary
Problem detected on community.grafana collection.
The CI fails because of the following error:
```
ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
```
[Link to the github actions logs](https://github.com/ansible-collections/community.grafana/runs/7376131642?check_suite_focus=true).
Edit: I edited my workflow to let all the job run and disable fail-fast. It makes it visible that issue only happens on devel:
https://github.com/ansible-collections/community.grafana/runs/7376236257?check_suite_focus=true
### Issue Type
Bug Report
### Component Name
core ?
### Ansible Version
```console
devel
(`pip install https://github.com/ansible/ansible/archive/devel.tar.gz`)
```
### Configuration
```console
N/A
```
### OS / Environment
ubuntu 20.04.4
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
On `community.grafana` collection
```yaml (paste below)
ansible-test integration -v --color --retry-on-error --requirements --python 3.8 --continue-on-error --diff --coverage
```
### Expected Results
No error related to locale.
### Actual Results
```console
ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78283
|
https://github.com/ansible/ansible/pull/78389
|
27ce607a144917e6b9a453813a7df6bbc9ea2213
|
d8fefba20e8023822749d538db7e69f0fc86710e
| 2022-07-17T07:57:25Z |
python
| 2022-08-02T23:59:14Z |
changelogs/fragments/ansible-test-locale.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,283 |
CI started to fail because of locale error against devel
|
### Summary
Problem detected on community.grafana collection.
The CI fails because of the following error:
```
ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
```
[Link to the github actions logs](https://github.com/ansible-collections/community.grafana/runs/7376131642?check_suite_focus=true).
Edit: I edited my workflow to let all the job run and disable fail-fast. It makes it visible that issue only happens on devel:
https://github.com/ansible-collections/community.grafana/runs/7376236257?check_suite_focus=true
### Issue Type
Bug Report
### Component Name
core ?
### Ansible Version
```console
devel
(`pip install https://github.com/ansible/ansible/archive/devel.tar.gz`)
```
### Configuration
```console
N/A
```
### OS / Environment
ubuntu 20.04.4
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
On `community.grafana` collection
```yaml (paste below)
ansible-test integration -v --color --retry-on-error --requirements --python 3.8 --continue-on-error --diff --coverage
```
### Expected Results
No error related to locale.
### Actual Results
```console
ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78283
|
https://github.com/ansible/ansible/pull/78389
|
27ce607a144917e6b9a453813a7df6bbc9ea2213
|
d8fefba20e8023822749d538db7e69f0fc86710e
| 2022-07-17T07:57:25Z |
python
| 2022-08-02T23:59:14Z |
test/lib/ansible_test/_internal/__init__.py
|
"""Test runner for all Ansible tests."""
from __future__ import annotations
import os
import sys
import typing as t
# This import should occur as early as possible.
# It must occur before subprocess has been imported anywhere in the current process.
from .init import (
CURRENT_RLIMIT_NOFILE,
)
from .util import (
ApplicationError,
display,
)
from .delegation import (
delegate,
)
from .executor import (
ApplicationWarning,
Delegate,
ListTargets,
)
from .timeout import (
configure_timeout,
)
from .data import (
data_context,
)
from .util_common import (
CommonConfig,
)
from .cli import (
parse_args,
)
from .provisioning import (
PrimeContainers,
)
def main(cli_args=None): # type: (t.Optional[t.List[str]]) -> None
"""Main program function."""
try:
os.chdir(data_context().content.root)
args = parse_args(cli_args)
config = args.config(args) # type: CommonConfig
display.verbosity = config.verbosity
display.truncate = config.truncate
display.redact = config.redact
display.color = config.color
display.fd = sys.stderr if config.display_stderr else sys.stdout
configure_timeout(config)
display.info('RLIMIT_NOFILE: %s' % (CURRENT_RLIMIT_NOFILE,), verbosity=2)
delegate_args = None
target_names = None
try:
if config.check_layout:
data_context().check_layout()
args.func(config)
except PrimeContainers:
pass
except ListTargets as ex:
# save target_names for use once we exit the exception handler
target_names = ex.target_names
except Delegate as ex:
# save delegation args for use once we exit the exception handler
delegate_args = (ex.host_state, ex.exclude, ex.require)
if delegate_args:
delegate(config, *delegate_args)
if target_names:
for target_name in target_names:
print(target_name) # display goes to stderr, this should be on stdout
display.review_warnings()
config.success = True
except ApplicationWarning as ex:
display.warning('%s' % ex)
sys.exit(0)
except ApplicationError as ex:
display.fatal('%s' % ex)
sys.exit(1)
except KeyboardInterrupt:
sys.exit(2)
except BrokenPipeError:
sys.exit(3)
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,283 |
CI started to fail because of locale error against devel
|
### Summary
Problem detected on community.grafana collection.
The CI fails because of the following error:
```
ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
```
[Link to the github actions logs](https://github.com/ansible-collections/community.grafana/runs/7376131642?check_suite_focus=true).
Edit: I edited my workflow to let all the job run and disable fail-fast. It makes it visible that issue only happens on devel:
https://github.com/ansible-collections/community.grafana/runs/7376236257?check_suite_focus=true
### Issue Type
Bug Report
### Component Name
core ?
### Ansible Version
```console
devel
(`pip install https://github.com/ansible/ansible/archive/devel.tar.gz`)
```
### Configuration
```console
N/A
```
### OS / Environment
ubuntu 20.04.4
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
On `community.grafana` collection
```yaml (paste below)
ansible-test integration -v --color --retry-on-error --requirements --python 3.8 --continue-on-error --diff --coverage
```
### Expected Results
No error related to locale.
### Actual Results
```console
ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78283
|
https://github.com/ansible/ansible/pull/78389
|
27ce607a144917e6b9a453813a7df6bbc9ea2213
|
d8fefba20e8023822749d538db7e69f0fc86710e
| 2022-07-17T07:57:25Z |
python
| 2022-08-02T23:59:14Z |
test/lib/ansible_test/_internal/delegation.py
|
"""Delegate test execution to another environment."""
from __future__ import annotations
import contextlib
import json
import os
import tempfile
import typing as t
from .io import (
make_dirs,
)
from .config import (
CommonConfig,
EnvironmentConfig,
IntegrationConfig,
ShellConfig,
TestConfig,
UnitsConfig,
)
from .util import (
SubprocessError,
display,
filter_args,
ANSIBLE_BIN_PATH,
ANSIBLE_LIB_ROOT,
ANSIBLE_TEST_ROOT,
OutputStream,
)
from .util_common import (
ResultType,
process_scoped_temporary_directory,
)
from .containers import (
support_container_context,
ContainerDatabase,
)
from .data import (
data_context,
)
from .payload import (
create_payload,
)
from .ci import (
get_ci_provider,
)
from .host_configs import (
OriginConfig,
PythonConfig,
)
from .connections import (
Connection,
DockerConnection,
SshConnection,
LocalConnection,
)
from .provisioning import (
HostState,
)
@contextlib.contextmanager
def delegation_context(args, host_state): # type: (EnvironmentConfig, HostState) -> t.Iterator[None]
"""Context manager for serialized host state during delegation."""
make_dirs(ResultType.TMP.path)
# noinspection PyUnusedLocal
python = host_state.controller_profile.python # make sure the python interpreter has been initialized before serializing host state
del python
with tempfile.TemporaryDirectory(prefix='host-', dir=ResultType.TMP.path) as host_dir:
args.host_settings.serialize(os.path.join(host_dir, 'settings.dat'))
host_state.serialize(os.path.join(host_dir, 'state.dat'))
args.host_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(host_dir))
try:
yield
finally:
args.host_path = None
def delegate(args, host_state, exclude, require): # type: (CommonConfig, HostState, t.List[str], t.List[str]) -> None
"""Delegate execution of ansible-test to another environment."""
assert isinstance(args, EnvironmentConfig)
with delegation_context(args, host_state):
if isinstance(args, TestConfig):
args.metadata.ci_provider = get_ci_provider().code
make_dirs(ResultType.TMP.path)
with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd:
args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name))
args.metadata.to_file(args.metadata_path)
try:
delegate_command(args, host_state, exclude, require)
finally:
args.metadata_path = None
else:
delegate_command(args, host_state, exclude, require)
def delegate_command(args, host_state, exclude, require): # type: (EnvironmentConfig, HostState, t.List[str], t.List[str]) -> None
"""Delegate execution based on the provided host state."""
con = host_state.controller_profile.get_origin_controller_connection()
working_directory = host_state.controller_profile.get_working_directory()
host_delegation = not isinstance(args.controller, OriginConfig)
if host_delegation:
if data_context().content.collection:
content_root = os.path.join(working_directory, data_context().content.collection.directory)
else:
content_root = os.path.join(working_directory, 'ansible')
ansible_bin_path = os.path.join(working_directory, 'ansible', 'bin')
with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as payload_file:
create_payload(args, payload_file.name)
con.extract_archive(chdir=working_directory, src=payload_file)
else:
content_root = working_directory
ansible_bin_path = ANSIBLE_BIN_PATH
command = generate_command(args, host_state.controller_profile.python, ansible_bin_path, content_root, exclude, require)
if isinstance(con, SshConnection):
ssh = con.settings
else:
ssh = None
options = []
if isinstance(args, IntegrationConfig) and args.controller.is_managed and all(target.is_managed for target in args.targets):
if not args.allow_destructive:
options.append('--allow-destructive')
with support_container_context(args, ssh) as containers: # type: t.Optional[ContainerDatabase]
if containers:
options.extend(['--containers', json.dumps(containers.to_dict())])
# Run unit tests unprivileged to prevent stray writes to the source tree.
# Also disconnect from the network once requirements have been installed.
if isinstance(args, UnitsConfig) and isinstance(con, DockerConnection):
pytest_user = 'pytest'
writable_dirs = [
os.path.join(content_root, ResultType.JUNIT.relative_path),
os.path.join(content_root, ResultType.COVERAGE.relative_path),
]
con.run(['mkdir', '-p'] + writable_dirs, capture=True)
con.run(['chmod', '777'] + writable_dirs, capture=True)
con.run(['chmod', '755', working_directory], capture=True)
con.run(['chmod', '644', os.path.join(content_root, args.metadata_path)], capture=True)
con.run(['useradd', pytest_user, '--create-home'], capture=True)
con.run(insert_options(command, options + ['--requirements-mode', 'only']), capture=False)
container = con.inspect()
networks = container.get_network_names()
if networks is not None:
for network in networks:
try:
con.disconnect_network(network)
except SubprocessError:
display.warning(
'Unable to disconnect network "%s" (this is normal under podman). '
'Tests will not be isolated from the network. Network-related tests may '
'misbehave.' % (network,)
)
else:
display.warning('Network disconnection is not supported (this is normal under podman). '
'Tests will not be isolated from the network. Network-related tests may misbehave.')
options.extend(['--requirements-mode', 'skip'])
con.user = pytest_user
success = False
try:
# When delegating, preserve the original separate stdout/stderr streams, but only when the following conditions are met:
# 1) Display output is being sent to stderr. This indicates the output on stdout must be kept separate from stderr.
# 2) The delegation is non-interactive. Interactive mode, which generally uses a TTY, is not compatible with intercepting stdout/stderr.
# The downside to having separate streams is that individual lines of output from each are more likely to appear out-of-order.
output_stream = OutputStream.ORIGINAL if args.display_stderr and not args.interactive else None
con.run(insert_options(command, options), capture=False, interactive=args.interactive, output_stream=output_stream)
success = True
finally:
if host_delegation:
download_results(args, con, content_root, success)
def insert_options(command, options):
"""Insert addition command line options into the given command and return the result."""
result = []
for arg in command:
if options and arg.startswith('--'):
result.extend(options)
options = None
result.append(arg)
return result
def download_results(args, con, content_root, success): # type: (EnvironmentConfig, Connection, str, bool) -> None
"""Download results from a delegated controller."""
remote_results_root = os.path.join(content_root, data_context().content.results_path)
local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path))
remote_test_root = os.path.dirname(remote_results_root)
remote_results_name = os.path.basename(remote_results_root)
make_dirs(local_test_root) # make sure directory exists for collections which have no tests
with tempfile.NamedTemporaryFile(prefix='ansible-test-result-', suffix='.tgz') as result_file:
try:
con.create_archive(chdir=remote_test_root, name=remote_results_name, dst=result_file, exclude=ResultType.TMP.name)
except SubprocessError as ex:
if success:
raise # download errors are fatal if tests succeeded
# surface download failures as a warning here to avoid masking test failures
display.warning(f'Failed to download results while handling an exception: {ex}')
else:
result_file.seek(0)
local_con = LocalConnection(args)
local_con.extract_archive(chdir=local_test_root, src=result_file)
def generate_command(
args, # type: EnvironmentConfig
python, # type: PythonConfig
ansible_bin_path, # type: str
content_root, # type: str
exclude, # type: t.List[str]
require, # type: t.List[str]
): # type: (...) -> t.List[str]
"""Generate the command necessary to delegate ansible-test."""
cmd = [os.path.join(ansible_bin_path, 'ansible-test')]
cmd = [python.path] + cmd
# Force the encoding used during delegation.
# This is only needed because ansible-test relies on Python's file system encoding.
# Environments that do not have the locale configured are thus unable to work with unicode file paths.
# Examples include FreeBSD and some Linux containers.
env_vars = dict(
LC_ALL='en_US.UTF-8',
ANSIBLE_TEST_CONTENT_ROOT=content_root,
)
if isinstance(args.controller, OriginConfig):
# Expose the ansible and ansible_test library directories to the Python environment.
# This is only required when delegation is used on the origin host.
library_path = process_scoped_temporary_directory(args)
os.symlink(ANSIBLE_LIB_ROOT, os.path.join(library_path, 'ansible'))
os.symlink(ANSIBLE_TEST_ROOT, os.path.join(library_path, 'ansible_test'))
env_vars.update(
PYTHONPATH=library_path,
)
# Propagate the TERM environment variable to the remote host when using the shell command.
if isinstance(args, ShellConfig):
term = os.environ.get('TERM')
if term is not None:
env_vars.update(TERM=term)
env_args = ['%s=%s' % (key, env_vars[key]) for key in sorted(env_vars)]
cmd = ['/usr/bin/env'] + env_args + cmd
cmd += list(filter_options(args, args.host_settings.filtered_args, exclude, require))
return cmd
def filter_options(
args, # type: EnvironmentConfig
argv, # type: t.List[str]
exclude, # type: t.List[str]
require, # type: t.List[str]
): # type: (...) -> t.Iterable[str]
"""Return an iterable that filters out unwanted CLI options and injects new ones as requested."""
replace: list[tuple[str, int, t.Optional[t.Union[bool, str, list[str]]]]] = [
('--docker-no-pull', 0, False),
('--truncate', 1, str(args.truncate)),
('--color', 1, 'yes' if args.color else 'no'),
('--redact', 0, False),
('--no-redact', 0, not args.redact),
('--host-path', 1, args.host_path),
]
if isinstance(args, TestConfig):
replace.extend([
('--changed', 0, False),
('--tracked', 0, False),
('--untracked', 0, False),
('--ignore-committed', 0, False),
('--ignore-staged', 0, False),
('--ignore-unstaged', 0, False),
('--changed-from', 1, False),
('--changed-path', 1, False),
('--metadata', 1, args.metadata_path),
('--exclude', 1, exclude),
('--require', 1, require),
('--base-branch', 1, args.base_branch or get_ci_provider().get_base_branch()),
])
pass_through_args: list[str] = []
for arg in filter_args(argv, {option: count for option, count, replacement in replace}):
if arg == '--' or pass_through_args:
pass_through_args.append(arg)
continue
yield arg
for option, _count, replacement in replace:
if not replacement:
continue
if isinstance(replacement, bool):
yield option
elif isinstance(replacement, str):
yield from [option, replacement]
elif isinstance(replacement, list):
for item in replacement:
yield from [option, item]
yield from args.delegate_args
yield from pass_through_args
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,283 |
CI started to fail because of locale error against devel
|
### Summary
Problem detected on community.grafana collection.
The CI fails because of the following error:
```
ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
```
[Link to the github actions logs](https://github.com/ansible-collections/community.grafana/runs/7376131642?check_suite_focus=true).
Edit: I edited my workflow to let all the job run and disable fail-fast. It makes it visible that issue only happens on devel:
https://github.com/ansible-collections/community.grafana/runs/7376236257?check_suite_focus=true
### Issue Type
Bug Report
### Component Name
core ?
### Ansible Version
```console
devel
(`pip install https://github.com/ansible/ansible/archive/devel.tar.gz`)
```
### Configuration
```console
N/A
```
### OS / Environment
ubuntu 20.04.4
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
On `community.grafana` collection
```yaml (paste below)
ansible-test integration -v --color --retry-on-error --requirements --python 3.8 --continue-on-error --diff --coverage
```
### Expected Results
No error related to locale.
### Actual Results
```console
ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78283
|
https://github.com/ansible/ansible/pull/78389
|
27ce607a144917e6b9a453813a7df6bbc9ea2213
|
d8fefba20e8023822749d538db7e69f0fc86710e
| 2022-07-17T07:57:25Z |
python
| 2022-08-02T23:59:14Z |
test/lib/ansible_test/_internal/locale_util.py
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,283 |
CI started to fail because of locale error against devel
|
### Summary
Problem detected on community.grafana collection.
The CI fails because of the following error:
```
ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
```
[Link to the github actions logs](https://github.com/ansible-collections/community.grafana/runs/7376131642?check_suite_focus=true).
Edit: I edited my workflow to let all the job run and disable fail-fast. It makes it visible that issue only happens on devel:
https://github.com/ansible-collections/community.grafana/runs/7376236257?check_suite_focus=true
### Issue Type
Bug Report
### Component Name
core ?
### Ansible Version
```console
devel
(`pip install https://github.com/ansible/ansible/archive/devel.tar.gz`)
```
### Configuration
```console
N/A
```
### OS / Environment
ubuntu 20.04.4
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
On `community.grafana` collection
```yaml (paste below)
ansible-test integration -v --color --retry-on-error --requirements --python 3.8 --continue-on-error --diff --coverage
```
### Expected Results
No error related to locale.
### Actual Results
```console
ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78283
|
https://github.com/ansible/ansible/pull/78389
|
27ce607a144917e6b9a453813a7df6bbc9ea2213
|
d8fefba20e8023822749d538db7e69f0fc86710e
| 2022-07-17T07:57:25Z |
python
| 2022-08-02T23:59:14Z |
test/lib/ansible_test/_internal/util.py
|
"""Miscellaneous utility functions and classes."""
from __future__ import annotations
import abc
import errno
import enum
import fcntl
import importlib.util
import inspect
import json
import keyword
import os
import platform
import pkgutil
import random
import re
import shutil
import stat
import string
import subprocess
import sys
import time
import functools
import shlex
import typing as t
from struct import unpack, pack
from termios import TIOCGWINSZ
try:
from typing_extensions import TypeGuard # TypeGuard was added in Python 3.10
except ImportError:
TypeGuard = None
from .encoding import (
to_bytes,
to_optional_bytes,
to_optional_text,
)
from .io import (
open_binary_file,
read_text_file,
)
from .thread import (
mutex,
WrappedThread,
)
from .constants import (
SUPPORTED_PYTHON_VERSIONS,
)
C = t.TypeVar('C')
TBase = t.TypeVar('TBase')
TKey = t.TypeVar('TKey')
TValue = t.TypeVar('TValue')
PYTHON_PATHS = {} # type: t.Dict[str, str]
COVERAGE_CONFIG_NAME = 'coveragerc'
ANSIBLE_TEST_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# assume running from install
ANSIBLE_ROOT = os.path.dirname(ANSIBLE_TEST_ROOT)
ANSIBLE_BIN_PATH = os.path.dirname(os.path.abspath(sys.argv[0]))
ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'ansible')
ANSIBLE_SOURCE_ROOT = None
if not os.path.exists(ANSIBLE_LIB_ROOT):
# running from source
ANSIBLE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(ANSIBLE_TEST_ROOT)))
ANSIBLE_BIN_PATH = os.path.join(ANSIBLE_ROOT, 'bin')
ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'lib', 'ansible')
ANSIBLE_SOURCE_ROOT = ANSIBLE_ROOT
ANSIBLE_TEST_DATA_ROOT = os.path.join(ANSIBLE_TEST_ROOT, '_data')
ANSIBLE_TEST_UTIL_ROOT = os.path.join(ANSIBLE_TEST_ROOT, '_util')
ANSIBLE_TEST_CONFIG_ROOT = os.path.join(ANSIBLE_TEST_ROOT, 'config')
ANSIBLE_TEST_CONTROLLER_ROOT = os.path.join(ANSIBLE_TEST_UTIL_ROOT, 'controller')
ANSIBLE_TEST_TARGET_ROOT = os.path.join(ANSIBLE_TEST_UTIL_ROOT, 'target')
ANSIBLE_TEST_TOOLS_ROOT = os.path.join(ANSIBLE_TEST_CONTROLLER_ROOT, 'tools')
ANSIBLE_TEST_TARGET_TOOLS_ROOT = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'tools')
# Modes are set to allow all users the same level of access.
# This permits files to be used in tests that change users.
# The only exception is write access to directories for the user creating them.
# This avoids having to modify the directory permissions a second time.
MODE_READ = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
MODE_FILE = MODE_READ
MODE_FILE_EXECUTE = MODE_FILE | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
MODE_FILE_WRITE = MODE_FILE | stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
MODE_DIRECTORY = MODE_READ | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
MODE_DIRECTORY_WRITE = MODE_DIRECTORY | stat.S_IWGRP | stat.S_IWOTH
class OutputStream(enum.Enum):
"""The output stream to use when running a subprocess and redirecting/capturing stdout or stderr."""
ORIGINAL = enum.auto()
AUTO = enum.auto()
def get_buffer(self, original: t.BinaryIO) -> t.BinaryIO:
"""Return the correct output buffer to use, taking into account the given original buffer."""
if self == OutputStream.ORIGINAL:
return original
if self == OutputStream.AUTO:
return display.fd.buffer
raise NotImplementedError(str(self))
class Architecture:
"""
Normalized architecture names.
These are the architectures supported by ansible-test, such as when provisioning remote instances.
"""
X86_64 = 'x86_64'
AARCH64 = 'aarch64'
REMOTE_ARCHITECTURES = list(value for key, value in Architecture.__dict__.items() if not key.startswith('__'))
def is_valid_identifier(value: str) -> bool:
"""Return True if the given value is a valid non-keyword Python identifier, otherwise return False."""
return value.isidentifier() and not keyword.iskeyword(value)
def cache(func): # type: (t.Callable[[], TValue]) -> t.Callable[[], TValue]
"""Enforce exclusive access on a decorated function and cache the result."""
storage = {} # type: t.Dict[None, TValue]
sentinel = object()
@functools.wraps(func)
def cache_func():
"""Cache the return value from func."""
if (value := storage.get(None, sentinel)) is sentinel:
value = storage[None] = func()
return value
wrapper = mutex(cache_func)
return wrapper
@mutex
def detect_architecture(python: str) -> t.Optional[str]:
"""Detect the architecture of the specified Python and return a normalized version, or None if it cannot be determined."""
results: t.Dict[str, t.Optional[str]]
try:
results = detect_architecture.results # type: ignore[attr-defined]
except AttributeError:
results = detect_architecture.results = {} # type: ignore[attr-defined]
if python in results:
return results[python]
if python == sys.executable or os.path.realpath(python) == os.path.realpath(sys.executable):
uname = platform.uname()
else:
data = raw_command([python, '-c', 'import json, platform; print(json.dumps(platform.uname()));'], capture=True)[0]
uname = json.loads(data)
translation = {
'x86_64': Architecture.X86_64, # Linux, macOS
'amd64': Architecture.X86_64, # FreeBSD
'aarch64': Architecture.AARCH64, # Linux, FreeBSD
'arm64': Architecture.AARCH64, # FreeBSD
}
candidates = []
if len(uname) >= 5:
candidates.append(uname[4])
if len(uname) >= 6:
candidates.append(uname[5])
candidates = sorted(set(candidates))
architectures = sorted(set(arch for arch in [translation.get(candidate) for candidate in candidates] if arch))
architecture: t.Optional[str] = None
if not architectures:
display.warning(f'Unable to determine architecture for Python interpreter "{python}" from: {candidates}')
elif len(architectures) == 1:
architecture = architectures[0]
display.info(f'Detected architecture {architecture} for Python interpreter: {python}', verbosity=1)
else:
display.warning(f'Conflicting architectures detected ({architectures}) for Python interpreter "{python}" from: {candidates}')
results[python] = architecture
return architecture
def filter_args(args, filters): # type: (t.List[str], t.Dict[str, int]) -> t.List[str]
"""Return a filtered version of the given command line arguments."""
remaining = 0
result = []
for arg in args:
if not arg.startswith('-') and remaining:
remaining -= 1
continue
remaining = 0
parts = arg.split('=', 1)
key = parts[0]
if key in filters:
remaining = filters[key] - len(parts) + 1
continue
result.append(arg)
return result
def read_lines_without_comments(path, remove_blank_lines=False, optional=False): # type: (str, bool, bool) -> t.List[str]
"""
Returns lines from the specified text file with comments removed.
Comments are any content from a hash symbol to the end of a line.
Any spaces immediately before a comment are also removed.
"""
if optional and not os.path.exists(path):
return []
lines = read_text_file(path).splitlines()
lines = [re.sub(r' *#.*$', '', line) for line in lines]
if remove_blank_lines:
lines = [line for line in lines if line]
return lines
def exclude_none_values(data): # type: (t.Dict[TKey, t.Optional[TValue]]) -> t.Dict[TKey, TValue]
"""Return the provided dictionary with any None values excluded."""
return dict((key, value) for key, value in data.items() if value is not None)
def find_executable(executable, cwd=None, path=None, required=True): # type: (str, t.Optional[str], t.Optional[str], t.Union[bool, str]) -> t.Optional[str]
"""
Find the specified executable and return the full path, or None if it could not be found.
If required is True an exception will be raised if the executable is not found.
If required is set to 'warning' then a warning will be shown if the executable is not found.
"""
match = None
real_cwd = os.getcwd()
if not cwd:
cwd = real_cwd
if os.path.dirname(executable):
target = os.path.join(cwd, executable)
if os.path.exists(target) and os.access(target, os.F_OK | os.X_OK):
match = executable
else:
if path is None:
path = os.environ.get('PATH', os.path.defpath)
if path:
path_dirs = path.split(os.path.pathsep)
seen_dirs = set()
for path_dir in path_dirs:
if path_dir in seen_dirs:
continue
seen_dirs.add(path_dir)
if os.path.abspath(path_dir) == real_cwd:
path_dir = cwd
candidate = os.path.join(path_dir, executable)
if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
match = candidate
break
if not match and required:
message = 'Required program "%s" not found.' % executable
if required != 'warning':
raise ApplicationError(message)
display.warning(message)
return match
def find_python(version, path=None, required=True): # type: (str, t.Optional[str], bool) -> t.Optional[str]
"""
Find and return the full path to the specified Python version.
If required, an exception will be raised not found.
If not required, None will be returned if not found.
"""
version_info = str_to_version(version)
if not path and version_info == sys.version_info[:len(version_info)]:
python_bin = sys.executable
else:
python_bin = find_executable('python%s' % version, path=path, required=required)
return python_bin
@cache
def get_ansible_version(): # type: () -> str
"""Return the Ansible version."""
# ansible may not be in our sys.path
# avoids a symlink to release.py since ansible placement relative to ansible-test may change during delegation
load_module(os.path.join(ANSIBLE_LIB_ROOT, 'release.py'), 'ansible_release')
# noinspection PyUnresolvedReferences
from ansible_release import __version__ as ansible_version # pylint: disable=import-error
return ansible_version
@cache
def get_available_python_versions(): # type: () -> t.Dict[str, str]
"""Return a dictionary indicating which supported Python versions are available."""
return dict((version, path) for version, path in ((version, find_python(version, required=False)) for version in SUPPORTED_PYTHON_VERSIONS) if path)
def raw_command(
cmd, # type: t.Iterable[str]
capture, # type: bool
env=None, # type: t.Optional[t.Dict[str, str]]
data=None, # type: t.Optional[str]
cwd=None, # type: t.Optional[str]
explain=False, # type: bool
stdin=None, # type: t.Optional[t.Union[t.IO[bytes], int]]
stdout=None, # type: t.Optional[t.Union[t.IO[bytes], int]]
interactive=False, # type: bool
output_stream=None, # type: t.Optional[OutputStream]
cmd_verbosity=1, # type: int
str_errors='strict', # type: str
error_callback=None, # type: t.Optional[t.Callable[[SubprocessError], None]]
): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]]
"""Run the specified command and return stdout and stderr as a tuple."""
output_stream = output_stream or OutputStream.AUTO
if capture and interactive:
raise InternalError('Cannot combine capture=True with interactive=True.')
if data and interactive:
raise InternalError('Cannot combine data with interactive=True.')
if stdin and interactive:
raise InternalError('Cannot combine stdin with interactive=True.')
if stdout and interactive:
raise InternalError('Cannot combine stdout with interactive=True.')
if stdin and data:
raise InternalError('Cannot combine stdin with data.')
if stdout and not capture:
raise InternalError('Redirection of stdout requires capture=True to avoid redirection of stderr to stdout.')
if output_stream != OutputStream.AUTO and capture:
raise InternalError(f'Cannot combine {output_stream=} with capture=True.')
if output_stream != OutputStream.AUTO and interactive:
raise InternalError(f'Cannot combine {output_stream=} with interactive=True.')
if not cwd:
cwd = os.getcwd()
if not env:
env = common_environment()
cmd = list(cmd)
escaped_cmd = shlex.join(cmd)
if capture:
description = 'Run'
elif interactive:
description = 'Interactive'
else:
description = 'Stream'
description += ' command'
with_types = []
if data:
with_types.append('data')
if stdin:
with_types.append('stdin')
if stdout:
with_types.append('stdout')
if with_types:
description += f' with {"/".join(with_types)}'
display.info(f'{description}: {escaped_cmd}', verbosity=cmd_verbosity, truncate=True)
display.info('Working directory: %s' % cwd, verbosity=2)
program = find_executable(cmd[0], cwd=cwd, path=env['PATH'], required='warning')
if program:
display.info('Program found: %s' % program, verbosity=2)
for key in sorted(env.keys()):
display.info('%s=%s' % (key, env[key]), verbosity=2)
if explain:
return None, None
communicate = False
if stdin is not None:
data = None
elif data is not None:
stdin = subprocess.PIPE
communicate = True
elif interactive:
pass # allow the subprocess access to our stdin
else:
stdin = subprocess.DEVNULL
if not interactive:
# When not running interactively, send subprocess stdout/stderr through a pipe.
# This isolates the stdout/stderr of the subprocess from the current process, and also hides the current TTY from it, if any.
# This prevents subprocesses from sharing stdout/stderr with the current process or each other.
# Doing so allows subprocesses to safely make changes to their file handles, such as making them non-blocking (ssh does this).
# This also maintains consistency between local testing and CI systems, which typically do not provide a TTY.
# To maintain output ordering, a single pipe is used for both stdout/stderr when not capturing output unless the output stream is ORIGINAL.
stdout = stdout or subprocess.PIPE
stderr = subprocess.PIPE if capture or output_stream == OutputStream.ORIGINAL else subprocess.STDOUT
communicate = True
else:
stderr = None
start = time.time()
process = None
try:
try:
cmd_bytes = [to_bytes(c) for c in cmd]
env_bytes = dict((to_bytes(k), to_bytes(v)) for k, v in env.items())
process = subprocess.Popen(cmd_bytes, env=env_bytes, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd) # pylint: disable=consider-using-with
except OSError as ex:
if ex.errno == errno.ENOENT:
raise ApplicationError('Required program "%s" not found.' % cmd[0])
raise
if communicate:
data_bytes = to_optional_bytes(data)
stdout_bytes, stderr_bytes = communicate_with_process(process, data_bytes, stdout == subprocess.PIPE, stderr == subprocess.PIPE, capture=capture,
output_stream=output_stream)
stdout_text = to_optional_text(stdout_bytes, str_errors) or ''
stderr_text = to_optional_text(stderr_bytes, str_errors) or ''
else:
process.wait()
stdout_text, stderr_text = None, None
finally:
if process and process.returncode is None:
process.kill()
display.info('') # the process we're interrupting may have completed a partial line of output
display.notice('Killed command to avoid an orphaned child process during handling of an unexpected exception.')
status = process.returncode
runtime = time.time() - start
display.info('Command exited with status %s after %s seconds.' % (status, runtime), verbosity=4)
if status == 0:
return stdout_text, stderr_text
raise SubprocessError(cmd, status, stdout_text, stderr_text, runtime, error_callback)
def communicate_with_process(
process: subprocess.Popen,
stdin: t.Optional[bytes],
stdout: bool,
stderr: bool,
capture: bool,
output_stream: OutputStream,
) -> t.Tuple[bytes, bytes]:
"""Communicate with the specified process, handling stdin/stdout/stderr as requested."""
threads: t.List[WrappedThread] = []
reader: t.Type[ReaderThread]
if capture:
reader = CaptureThread
else:
reader = OutputThread
if stdin is not None:
threads.append(WriterThread(process.stdin, stdin))
if stdout:
stdout_reader = reader(process.stdout, output_stream.get_buffer(sys.stdout.buffer))
threads.append(stdout_reader)
else:
stdout_reader = None
if stderr:
stderr_reader = reader(process.stderr, output_stream.get_buffer(sys.stderr.buffer))
threads.append(stderr_reader)
else:
stderr_reader = None
for thread in threads:
thread.start()
for thread in threads:
try:
thread.wait_for_result()
except Exception as ex: # pylint: disable=broad-except
display.error(str(ex))
if isinstance(stdout_reader, ReaderThread):
stdout_bytes = b''.join(stdout_reader.lines)
else:
stdout_bytes = b''
if isinstance(stderr_reader, ReaderThread):
stderr_bytes = b''.join(stderr_reader.lines)
else:
stderr_bytes = b''
process.wait()
return stdout_bytes, stderr_bytes
class WriterThread(WrappedThread):
"""Thread to write data to stdin of a subprocess."""
def __init__(self, handle: t.IO[bytes], data: bytes) -> None:
super().__init__(self._run)
self.handle = handle
self.data = data
def _run(self) -> None:
"""Workload to run on a thread."""
try:
self.handle.write(self.data)
self.handle.flush()
finally:
self.handle.close()
class ReaderThread(WrappedThread, metaclass=abc.ABCMeta):
"""Thread to read stdout from a subprocess."""
def __init__(self, handle: t.IO[bytes], buffer: t.BinaryIO) -> None:
super().__init__(self._run)
self.handle = handle
self.buffer = buffer
self.lines = [] # type: t.List[bytes]
@abc.abstractmethod
def _run(self) -> None:
"""Workload to run on a thread."""
class CaptureThread(ReaderThread):
"""Thread to capture stdout from a subprocess into a buffer."""
def _run(self) -> None:
"""Workload to run on a thread."""
src = self.handle
dst = self.lines
try:
for line in src:
dst.append(line)
finally:
src.close()
class OutputThread(ReaderThread):
"""Thread to pass stdout from a subprocess to stdout."""
def _run(self) -> None:
"""Workload to run on a thread."""
src = self.handle
dst = self.buffer
try:
for line in src:
dst.write(line)
dst.flush()
finally:
src.close()
def common_environment():
"""Common environment used for executing all programs."""
env = dict(
LC_ALL='en_US.UTF-8',
PATH=os.environ.get('PATH', os.path.defpath),
)
required = (
'HOME',
)
optional = (
'LD_LIBRARY_PATH',
'SSH_AUTH_SOCK',
# MacOS High Sierra Compatibility
# http://sealiesoftware.com/blog/archive/2017/6/5/Objective-C_and_fork_in_macOS_1013.html
# Example configuration for macOS:
# export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
'OBJC_DISABLE_INITIALIZE_FORK_SAFETY',
'ANSIBLE_KEEP_REMOTE_FILES',
# MacOS Homebrew Compatibility
# https://cryptography.io/en/latest/installation/#building-cryptography-on-macos
# This may also be required to install pyyaml with libyaml support when installed in non-standard locations.
# Example configuration for brew on macOS:
# export LDFLAGS="-L$(brew --prefix openssl)/lib/ -L$(brew --prefix libyaml)/lib/"
# export CFLAGS="-I$(brew --prefix openssl)/include/ -I$(brew --prefix libyaml)/include/"
'LDFLAGS',
'CFLAGS',
)
# FreeBSD Compatibility
# This is required to include libyaml support in PyYAML.
# The header /usr/local/include/yaml.h isn't in the default include path for the compiler.
# It is included here so that tests can take advantage of it, rather than only ansible-test during managed pip installs.
# If CFLAGS has been set in the environment that value will take precedence due to being an optional var when calling pass_vars.
if os.path.exists('/etc/freebsd-update.conf'):
env.update(CFLAGS='-I/usr/local/include/')
env.update(pass_vars(required=required, optional=optional))
return env
def pass_vars(required, optional): # type: (t.Collection[str], t.Collection[str]) -> t.Dict[str, str]
"""Return a filtered dictionary of environment variables based on the current environment."""
env = {}
for name in required:
if name not in os.environ:
raise MissingEnvironmentVariable(name)
env[name] = os.environ[name]
for name in optional:
if name not in os.environ:
continue
env[name] = os.environ[name]
return env
def remove_tree(path): # type: (str) -> None
"""Remove the specified directory, siliently continuing if the directory does not exist."""
try:
shutil.rmtree(to_bytes(path))
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
def is_binary_file(path): # type: (str) -> bool
"""Return True if the specified file is a binary file, otherwise return False."""
assume_text = {
'.cfg',
'.conf',
'.crt',
'.cs',
'.css',
'.html',
'.ini',
'.j2',
'.js',
'.json',
'.md',
'.pem',
'.ps1',
'.psm1',
'.py',
'.rst',
'.sh',
'.txt',
'.xml',
'.yaml',
'.yml',
}
assume_binary = {
'.bin',
'.eot',
'.gz',
'.ico',
'.iso',
'.jpg',
'.otf',
'.p12',
'.png',
'.pyc',
'.rpm',
'.ttf',
'.woff',
'.woff2',
'.zip',
}
ext = os.path.splitext(path)[1]
if ext in assume_text:
return False
if ext in assume_binary:
return True
with open_binary_file(path) as path_fd:
return b'\0' in path_fd.read(4096)
def generate_name(length=8): # type: (int) -> str
"""Generate and return a random name."""
return ''.join(random.choice(string.ascii_letters + string.digits) for _idx in range(length))
def generate_password(): # type: () -> str
"""Generate and return random password."""
chars = [
string.ascii_letters,
string.digits,
string.ascii_letters,
string.digits,
'-',
] * 4
password = ''.join([random.choice(char) for char in chars[:-1]])
display.sensitive.add(password)
return password
class Display:
"""Manages color console output."""
clear = '\033[0m'
red = '\033[31m'
green = '\033[32m'
yellow = '\033[33m'
blue = '\033[34m'
purple = '\033[35m'
cyan = '\033[36m'
verbosity_colors = {
0: None,
1: green,
2: blue,
3: cyan,
}
def __init__(self):
self.verbosity = 0
self.color = sys.stdout.isatty()
self.warnings = []
self.warnings_unique = set()
self.fd = sys.stderr # default to stderr until config is initialized to avoid early messages going to stdout
self.rows = 0
self.columns = 0
self.truncate = 0
self.redact = True
self.sensitive = set()
if os.isatty(0):
self.rows, self.columns = unpack('HHHH', fcntl.ioctl(0, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[:2]
def __warning(self, message): # type: (str) -> None
"""Internal implementation for displaying a warning message."""
self.print_message('WARNING: %s' % message, color=self.purple)
def review_warnings(self): # type: () -> None
"""Review all warnings which previously occurred."""
if not self.warnings:
return
self.__warning('Reviewing previous %d warning(s):' % len(self.warnings))
for warning in self.warnings:
self.__warning(warning)
def warning(self, message, unique=False, verbosity=0): # type: (str, bool, int) -> None
"""Display a warning level message."""
if verbosity > self.verbosity:
return
if unique:
if message in self.warnings_unique:
return
self.warnings_unique.add(message)
self.__warning(message)
self.warnings.append(message)
def notice(self, message): # type: (str) -> None
"""Display a notice level message."""
self.print_message('NOTICE: %s' % message, color=self.purple)
def error(self, message): # type: (str) -> None
"""Display an error level message."""
self.print_message('ERROR: %s' % message, color=self.red)
def fatal(self, message): # type: (str) -> None
"""Display a fatal level message."""
self.print_message('FATAL: %s' % message, color=self.red, stderr=True)
def info(self, message, verbosity=0, truncate=False): # type: (str, int, bool) -> None
"""Display an info level message."""
if self.verbosity >= verbosity:
color = self.verbosity_colors.get(verbosity, self.yellow)
self.print_message(message, color=color, truncate=truncate)
def print_message( # pylint: disable=locally-disabled, invalid-name
self,
message, # type: str
color=None, # type: t.Optional[str]
stderr=False, # type: bool
truncate=False, # type: bool
): # type: (...) -> None
"""Display a message."""
if self.redact and self.sensitive:
for item in self.sensitive:
if not item:
continue
message = message.replace(item, '*' * len(item))
if truncate:
if len(message) > self.truncate > 5:
message = message[:self.truncate - 5] + ' ...'
if color and self.color:
# convert color resets in message to desired color
message = message.replace(self.clear, color)
message = '%s%s%s' % (color, message, self.clear)
fd = sys.stderr if stderr else self.fd
print(message, file=fd)
fd.flush()
class InternalError(Exception):
"""An unhandled internal error indicating a bug in the code."""
def __init__(self, message: str) -> None:
super().__init__(f'An internal error has occurred in ansible-test: {message}')
class ApplicationError(Exception):
"""General application error."""
class ApplicationWarning(Exception):
"""General application warning which interrupts normal program flow."""
class SubprocessError(ApplicationError):
"""Error resulting from failed subprocess execution."""
def __init__(
self,
cmd, # type: t.List[str]
status=0, # type: int
stdout=None, # type: t.Optional[str]
stderr=None, # type: t.Optional[str]
runtime=None, # type: t.Optional[float]
error_callback=None, # type: t.Optional[t.Callable[[SubprocessError], None]]
): # type: (...) -> None
message = 'Command "%s" returned exit status %s.\n' % (shlex.join(cmd), status)
if stderr:
message += '>>> Standard Error\n'
message += '%s%s\n' % (stderr.strip(), Display.clear)
if stdout:
message += '>>> Standard Output\n'
message += '%s%s\n' % (stdout.strip(), Display.clear)
self.cmd = cmd
self.message = message
self.status = status
self.stdout = stdout
self.stderr = stderr
self.runtime = runtime
if error_callback:
error_callback(self)
self.message = self.message.strip()
super().__init__(self.message)
class MissingEnvironmentVariable(ApplicationError):
"""Error caused by missing environment variable."""
def __init__(self, name): # type: (str) -> None
super().__init__('Missing environment variable: %s' % name)
self.name = name
def retry(func, ex_type=SubprocessError, sleep=10, attempts=10, warn=True):
"""Retry the specified function on failure."""
for dummy in range(1, attempts):
try:
return func()
except ex_type as ex:
if warn:
display.warning(str(ex))
time.sleep(sleep)
return func()
def parse_to_list_of_dict(pattern, value): # type: (str, str) -> t.List[t.Dict[str, str]]
"""Parse lines from the given value using the specified pattern and return the extracted list of key/value pair dictionaries."""
matched = []
unmatched = []
for line in value.splitlines():
match = re.search(pattern, line)
if match:
matched.append(match.groupdict())
else:
unmatched.append(line)
if unmatched:
raise Exception('Pattern "%s" did not match values:\n%s' % (pattern, '\n'.join(unmatched)))
return matched
def get_subclasses(class_type): # type: (t.Type[C]) -> t.List[t.Type[C]]
"""Returns a list of types that are concrete subclasses of the given type."""
subclasses = set() # type: t.Set[t.Type[C]]
queue = [class_type] # type: t.List[t.Type[C]]
while queue:
parent = queue.pop()
for child in parent.__subclasses__():
if child not in subclasses:
if not inspect.isabstract(child):
subclasses.add(child)
queue.append(child)
return sorted(subclasses, key=lambda sc: sc.__name__)
def is_subdir(candidate_path, path): # type: (str, str) -> bool
"""Returns true if candidate_path is path or a subdirectory of path."""
if not path.endswith(os.path.sep):
path += os.path.sep
if not candidate_path.endswith(os.path.sep):
candidate_path += os.path.sep
return candidate_path.startswith(path)
def paths_to_dirs(paths): # type: (t.List[str]) -> t.List[str]
"""Returns a list of directories extracted from the given list of paths."""
dir_names = set()
for path in paths:
while True:
path = os.path.dirname(path)
if not path or path == os.path.sep:
break
dir_names.add(path + os.path.sep)
return sorted(dir_names)
def str_to_version(version): # type: (str) -> t.Tuple[int, ...]
"""Return a version tuple from a version string."""
return tuple(int(n) for n in version.split('.'))
def version_to_str(version): # type: (t.Tuple[int, ...]) -> str
"""Return a version string from a version tuple."""
return '.'.join(str(n) for n in version)
def sorted_versions(versions): # type: (t.List[str]) -> t.List[str]
"""Return a sorted copy of the given list of versions."""
return [version_to_str(version) for version in sorted(str_to_version(version) for version in versions)]
def import_plugins(directory, root=None): # type: (str, t.Optional[str]) -> None
"""
Import plugins from the given directory relative to the given root.
If the root is not provided, the 'lib' directory for the test runner will be used.
"""
if root is None:
root = os.path.dirname(__file__)
path = os.path.join(root, directory)
package = __name__.rsplit('.', 1)[0]
prefix = '%s.%s.' % (package, directory.replace(os.path.sep, '.'))
for (_module_loader, name, _ispkg) in pkgutil.iter_modules([path], prefix=prefix):
module_path = os.path.join(root, name[len(package) + 1:].replace('.', os.path.sep) + '.py')
load_module(module_path, name)
def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None
"""
Load plugins of the specified type and track them in the specified database.
Only plugins which have already been imported will be loaded.
"""
plugins = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type)) # type: t.Dict[str, t.Type[C]]
for plugin in plugins:
database[plugin] = plugins[plugin]
def load_module(path, name): # type: (str, str) -> None
"""Load a Python module using the given name and path."""
if name in sys.modules:
return
spec = importlib.util.spec_from_file_location(name, path)
module = importlib.util.module_from_spec(spec)
sys.modules[name] = module
spec.loader.exec_module(module)
def sanitize_host_name(name):
"""Return a sanitized version of the given name, suitable for use as a hostname."""
return re.sub('[^A-Za-z0-9]+', '-', name)[:63].strip('-')
def get_generic_type(base_type, generic_base_type): # type: (t.Type, t.Type[TValue]) -> t.Optional[t.Type[TValue]]
"""Return the generic type arg derived from the generic_base_type type that is associated with the base_type type, if any, otherwise return None."""
# noinspection PyUnresolvedReferences
type_arg = t.get_args(base_type.__orig_bases__[0])[0]
return None if isinstance(type_arg, generic_base_type) else type_arg
def get_type_associations(base_type, generic_base_type): # type: (t.Type[TBase], t.Type[TValue]) -> t.List[t.Tuple[t.Type[TValue], t.Type[TBase]]]
"""Create and return a list of tuples associating generic_base_type derived types with a corresponding base_type derived type."""
return [item for item in [(get_generic_type(sc_type, generic_base_type), sc_type) for sc_type in get_subclasses(base_type)] if item[1]]
def get_type_map(base_type, generic_base_type): # type: (t.Type[TBase], t.Type[TValue]) -> t.Dict[t.Type[TValue], t.Type[TBase]]
"""Create and return a mapping of generic_base_type derived types to base_type derived types."""
return {item[0]: item[1] for item in get_type_associations(base_type, generic_base_type)}
def verify_sys_executable(path): # type: (str) -> t.Optional[str]
"""Verify that the given path references the current Python interpreter. If not, return the expected path, otherwise return None."""
if path == sys.executable:
return None
if os.path.realpath(path) == os.path.realpath(sys.executable):
return None
expected_executable = raw_command([path, '-c', 'import sys; print(sys.executable)'], capture=True)[0]
if expected_executable == sys.executable:
return None
return expected_executable
def type_guard(sequence: t.Sequence[t.Any], guard_type: t.Type[C]) -> TypeGuard[t.Sequence[C]]:
"""
Raises an exception if any item in the given sequence does not match the specified guard type.
Use with assert so that type checkers are aware of the type guard.
"""
invalid_types = set(type(item) for item in sequence if not isinstance(item, guard_type))
if not invalid_types:
return True
invalid_type_names = sorted(str(item) for item in invalid_types)
raise Exception(f'Sequence required to contain only {guard_type} includes: {", ".join(invalid_type_names)}')
display = Display() # pylint: disable=locally-disabled, invalid-name
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,112 |
Allow ansible -a to parse from json
|
### Summary
Using the adhoc `ansible` cmd parses the value of `-a` uses the key=value splitter. It would be good for it to support parsing from json allowing people to specify nested structures and have the command/win_command/shell/etc modules support extra options outside the hardcoded list at https://github.com/ansible/ansible/blob/e8a77626a3b33832783433817108cbfbb84227ea/lib/ansible/parsing/splitter.py#L91.
### Issue Type
Feature Idea
### Component Name
ansible
### Additional Information
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78112
|
https://github.com/ansible/ansible/pull/78114
|
d8fefba20e8023822749d538db7e69f0fc86710e
|
ad79c1e0d032eb5dda216055ffc393043de4b380
| 2022-06-21T19:28:58Z |
python
| 2022-08-03T01:24:12Z |
changelogs/fragments/78112-adhoc-args-as-json.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,112 |
Allow ansible -a to parse from json
|
### Summary
Using the adhoc `ansible` cmd parses the value of `-a` uses the key=value splitter. It would be good for it to support parsing from json allowing people to specify nested structures and have the command/win_command/shell/etc modules support extra options outside the hardcoded list at https://github.com/ansible/ansible/blob/e8a77626a3b33832783433817108cbfbb84227ea/lib/ansible/parsing/splitter.py#L91.
### Issue Type
Feature Idea
### Component Name
ansible
### Additional Information
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78112
|
https://github.com/ansible/ansible/pull/78114
|
d8fefba20e8023822749d538db7e69f0fc86710e
|
ad79c1e0d032eb5dda216055ffc393043de4b380
| 2022-06-21T19:28:58Z |
python
| 2022-08-03T01:24:12Z |
docs/docsite/rst/user_guide/intro_adhoc.rst
|
.. _intro_adhoc:
*******************************
Introduction to ad hoc commands
*******************************
An Ansible ad hoc command uses the `/usr/bin/ansible` command-line tool to automate a single task on one or more managed nodes. ad hoc commands are quick and easy, but they are not reusable. So why learn about ad hoc commands first? ad hoc commands demonstrate the simplicity and power of Ansible. The concepts you learn here will port over directly to the playbook language. Before reading and executing these examples, please read :ref:`intro_inventory`.
.. contents::
:local:
Why use ad hoc commands?
========================
ad hoc commands are great for tasks you repeat rarely. For example, if you want to power off all the machines in your lab for Christmas vacation, you could execute a quick one-liner in Ansible without writing a playbook. An ad hoc command looks like this:
.. code-block:: bash
$ ansible [pattern] -m [module] -a "[module options]"
You can learn more about :ref:`patterns<intro_patterns>` and :ref:`modules<working_with_modules>` on other pages.
Use cases for ad hoc tasks
==========================
ad hoc tasks can be used to reboot servers, copy files, manage packages and users, and much more. You can use any Ansible module in an ad hoc task. ad hoc tasks, like playbooks, use a declarative model,
calculating and executing the actions required to reach a specified final state. They
achieve a form of idempotence by checking the current state before they begin and doing nothing unless the current state is different from the specified final state.
Rebooting servers
-----------------
The default module for the ``ansible`` command-line utility is the :ref:`ansible.builtin.command module<command_module>`. You can use an ad hoc task to call the command module and reboot all web servers in Atlanta, 10 at a time. Before Ansible can do this, you must have all servers in Atlanta listed in a group called [atlanta] in your inventory, and you must have working SSH credentials for each machine in that group. To reboot all the servers in the [atlanta] group:
.. code-block:: bash
$ ansible atlanta -a "/sbin/reboot"
By default Ansible uses only 5 simultaneous processes. If you have more hosts than the value set for the fork count, Ansible will talk to them, but it will take a little longer. To reboot the [atlanta] servers with 10 parallel forks:
.. code-block:: bash
$ ansible atlanta -a "/sbin/reboot" -f 10
/usr/bin/ansible will default to running from your user account. To connect as a different user:
.. code-block:: bash
$ ansible atlanta -a "/sbin/reboot" -f 10 -u username
Rebooting probably requires privilege escalation. You can connect to the server as ``username`` and run the command as the ``root`` user by using the :ref:`become <become>` keyword:
.. code-block:: bash
$ ansible atlanta -a "/sbin/reboot" -f 10 -u username --become [--ask-become-pass]
If you add ``--ask-become-pass`` or ``-K``, Ansible prompts you for the password to use for privilege escalation (sudo/su/pfexec/doas/etc).
.. note::
The :ref:`command module <command_module>` does not support extended shell syntax like piping and
redirects (although shell variables will always work). If your command requires shell-specific
syntax, use the `shell` module instead. Read more about the differences on the
:ref:`working_with_modules` page.
So far all our examples have used the default 'command' module. To use a different module, pass ``-m`` for module name. For example, to use the :ref:`ansible.builtin.shell module <shell_module>`:
.. code-block:: bash
$ ansible raleigh -m ansible.builtin.shell -a 'echo $TERM'
When running any command with the Ansible *ad hoc* CLI (as opposed to
:ref:`Playbooks <working_with_playbooks>`), pay particular attention to shell quoting rules, so
the local shell retains the variable and passes it to Ansible.
For example, using double rather than single quotes in the above example would
evaluate the variable on the box you were on.
.. _file_transfer:
Managing files
--------------
An ad hoc task can harness the power of Ansible and SCP to transfer many files to multiple machines in parallel. To transfer a file directly to all servers in the [atlanta] group:
.. code-block:: bash
$ ansible atlanta -m ansible.builtin.copy -a "src=/etc/hosts dest=/tmp/hosts"
If you plan to repeat a task like this, use the :ref:`ansible.builtin.template<template_module>` module in a playbook.
The :ref:`ansible.builtin.file<file_module>` module allows changing ownership and permissions on files. These
same options can be passed directly to the ``copy`` module as well:
.. code-block:: bash
$ ansible webservers -m ansible.builtin.file -a "dest=/srv/foo/a.txt mode=600"
$ ansible webservers -m ansible.builtin.file -a "dest=/srv/foo/b.txt mode=600 owner=mdehaan group=mdehaan"
The ``file`` module can also create directories, similar to ``mkdir -p``:
.. code-block:: bash
$ ansible webservers -m ansible.builtin.file -a "dest=/path/to/c mode=755 owner=mdehaan group=mdehaan state=directory"
As well as delete directories (recursively) and delete files:
.. code-block:: bash
$ ansible webservers -m ansible.builtin.file -a "dest=/path/to/c state=absent"
.. _managing_packages:
Managing packages
-----------------
You might also use an ad hoc task to install, update, or remove packages on managed nodes using a package management module such as ``yum``. Package management modules support common functions to install, remove, and generally manage packages. Some specific functions for a package manager might not be present in the Ansible module since they are not part of general package management.
To ensure a package is installed without updating it:
.. code-block:: bash
$ ansible webservers -m ansible.builtin.yum -a "name=acme state=present"
To ensure a specific version of a package is installed:
.. code-block:: bash
$ ansible webservers -m ansible.builtin.yum -a "name=acme-1.5 state=present"
To ensure a package is at the latest version:
.. code-block:: bash
$ ansible webservers -m ansible.builtin.yum -a "name=acme state=latest"
To ensure a package is not installed:
.. code-block:: bash
$ ansible webservers -m ansible.builtin.yum -a "name=acme state=absent"
Ansible has modules for managing packages under many platforms. If there is no module for your package manager, you can install packages using the command module or create a module for your package manager.
.. _users_and_groups:
Managing users and groups
-------------------------
You can create, manage, and remove user accounts on your managed nodes with ad hoc tasks:
.. code-block:: bash
$ ansible all -m ansible.builtin.user -a "name=foo password=<crypted password here>"
$ ansible all -m ansible.builtin.user -a "name=foo state=absent"
See the :ref:`ansible.builtin.user <user_module>` module documentation for details on all of the available options, including
how to manipulate groups and group membership.
.. _managing_services:
Managing services
-----------------
Ensure a service is started on all webservers:
.. code-block:: bash
$ ansible webservers -m ansible.builtin.service -a "name=httpd state=started"
Alternatively, restart a service on all webservers:
.. code-block:: bash
$ ansible webservers -m ansible.builtin.service -a "name=httpd state=restarted"
Ensure a service is stopped:
.. code-block:: bash
$ ansible webservers -m ansible.builtin.service -a "name=httpd state=stopped"
.. _gathering_facts:
Gathering facts
---------------
Facts represent discovered variables about a system. You can use facts to implement conditional execution of tasks but also just to get ad hoc information about your systems. To see all facts:
.. code-block:: bash
$ ansible all -m ansible.builtin.setup
You can also filter this output to display only certain facts, see the :ref:`ansible.builtin.setup <setup_module>` module documentation for details.
Patterns and ad-hoc commands
----------------------------
See the :ref:`patterns <intro_patterns>` documentation for details on all of the available options, including
how to limit using patterns in ad-hoc commands.
Now that you understand the basic elements of Ansible execution, you are ready to learn to automate repetitive tasks using :ref:`Ansible Playbooks <playbooks_intro>`.
.. seealso::
:ref:`intro_configuration`
All about the Ansible config file
:ref:`list_of_collections`
Browse existing collections, modules, and plugins
:ref:`working_with_playbooks`
Using Ansible for configuration management & deployment
`Mailing List <https://groups.google.com/group/ansible-project>`_
Questions? Help? Ideas? Stop by the list on Google Groups
:ref:`communication_irc`
How to join Ansible chat channels
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,112 |
Allow ansible -a to parse from json
|
### Summary
Using the adhoc `ansible` cmd parses the value of `-a` uses the key=value splitter. It would be good for it to support parsing from json allowing people to specify nested structures and have the command/win_command/shell/etc modules support extra options outside the hardcoded list at https://github.com/ansible/ansible/blob/e8a77626a3b33832783433817108cbfbb84227ea/lib/ansible/parsing/splitter.py#L91.
### Issue Type
Feature Idea
### Component Name
ansible
### Additional Information
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78112
|
https://github.com/ansible/ansible/pull/78114
|
d8fefba20e8023822749d538db7e69f0fc86710e
|
ad79c1e0d032eb5dda216055ffc393043de4b380
| 2022-06-21T19:28:58Z |
python
| 2022-08-03T01:24:12Z |
lib/ansible/cli/adhoc.py
|
#!/usr/bin/env python
# Copyright: (c) 2012, Michael DeHaan <[email protected]>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
from ansible import constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.module_utils._text import to_text
from ansible.parsing.splitter import parse_kv
from ansible.playbook import Playbook
from ansible.playbook.play import Play
from ansible.utils.display import Display
display = Display()
class AdHocCLI(CLI):
''' is an extra-simple tool/framework/API for doing 'remote things'.
this command allows you to define and run a single task 'playbook' against a set of hosts
'''
name = 'ansible'
def init_parser(self):
''' create an options parser for bin/ansible '''
super(AdHocCLI, self).init_parser(usage='%prog <host-pattern> [options]',
desc="Define and run a single task 'playbook' against a set of hosts",
epilog="Some actions do not make sense in Ad-Hoc (include, meta, etc)")
opt_help.add_runas_options(self.parser)
opt_help.add_inventory_options(self.parser)
opt_help.add_async_options(self.parser)
opt_help.add_output_options(self.parser)
opt_help.add_connect_options(self.parser)
opt_help.add_check_options(self.parser)
opt_help.add_runtask_options(self.parser)
opt_help.add_vault_options(self.parser)
opt_help.add_fork_options(self.parser)
opt_help.add_module_options(self.parser)
opt_help.add_basedir_options(self.parser)
opt_help.add_tasknoplay_options(self.parser)
# options unique to ansible ad-hoc
self.parser.add_argument('-a', '--args', dest='module_args',
help="The action's options in space separated k=v format: -a 'opt1=val1 opt2=val2'",
default=C.DEFAULT_MODULE_ARGS)
self.parser.add_argument('-m', '--module-name', dest='module_name',
help="Name of the action to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
default=C.DEFAULT_MODULE_NAME)
self.parser.add_argument('args', metavar='pattern', help='host pattern')
def post_process_args(self, options):
'''Post process and validate options for bin/ansible '''
options = super(AdHocCLI, self).post_process_args(options)
display.verbosity = options.verbosity
self.validate_conflicts(options, runas_opts=True, fork_opts=True)
return options
def _play_ds(self, pattern, async_val, poll):
check_raw = context.CLIARGS['module_name'] in C.MODULE_REQUIRE_ARGS
mytask = {'action': {'module': context.CLIARGS['module_name'], 'args': parse_kv(context.CLIARGS['module_args'], check_raw=check_raw)},
'timeout': context.CLIARGS['task_timeout']}
# avoid adding to tasks that don't support it, unless set, then give user an error
if context.CLIARGS['module_name'] not in C._ACTION_ALL_INCLUDE_ROLE_TASKS and any(frozenset((async_val, poll))):
mytask['async_val'] = async_val
mytask['poll'] = poll
return dict(
name="Ansible Ad-Hoc",
hosts=pattern,
gather_facts='no',
tasks=[mytask])
def run(self):
''' create and execute the single task playbook '''
super(AdHocCLI, self).run()
# only thing left should be host pattern
pattern = to_text(context.CLIARGS['args'], errors='surrogate_or_strict')
# handle password prompts
sshpass = None
becomepass = None
(sshpass, becomepass) = self.ask_passwords()
passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
# get basic objects
loader, inventory, variable_manager = self._play_prereqs()
# get list of hosts to execute against
try:
hosts = self.get_host_list(inventory, context.CLIARGS['subset'], pattern)
except AnsibleError:
if context.CLIARGS['subset']:
raise
else:
hosts = []
display.warning("No hosts matched, nothing to do")
# just listing hosts?
if context.CLIARGS['listhosts']:
display.display(' hosts (%d):' % len(hosts))
for host in hosts:
display.display(' %s' % host)
return 0
# verify we have arguments if we know we need em
if context.CLIARGS['module_name'] in C.MODULE_REQUIRE_ARGS and not context.CLIARGS['module_args']:
err = "No argument passed to %s module" % context.CLIARGS['module_name']
if pattern.endswith(".yml"):
err = err + ' (did you mean to run ansible-playbook?)'
raise AnsibleOptionsError(err)
# Avoid modules that don't work with ad-hoc
if context.CLIARGS['module_name'] in C._ACTION_IMPORT_PLAYBOOK:
raise AnsibleOptionsError("'%s' is not a valid action for ad-hoc commands"
% context.CLIARGS['module_name'])
# construct playbook objects to wrap task
play_ds = self._play_ds(pattern, context.CLIARGS['seconds'], context.CLIARGS['poll_interval'])
play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
# used in start callback
playbook = Playbook(loader)
playbook._entries.append(play)
playbook._file_name = '__adhoc_playbook__'
if self.callback:
cb = self.callback
elif context.CLIARGS['one_line']:
cb = 'oneline'
# Respect custom 'stdout_callback' only with enabled 'bin_ansible_callbacks'
elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default':
cb = C.DEFAULT_STDOUT_CALLBACK
else:
cb = 'minimal'
run_tree = False
if context.CLIARGS['tree']:
C.CALLBACKS_ENABLED.append('tree')
C.TREE_DIR = context.CLIARGS['tree']
run_tree = True
# now create a task queue manager to execute the play
self._tqm = None
try:
self._tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=passwords,
stdout_callback=cb,
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
run_tree=run_tree,
forks=context.CLIARGS['forks'],
)
self._tqm.load_callbacks()
self._tqm.send_callback('v2_playbook_on_start', playbook)
result = self._tqm.run(play)
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
finally:
if self._tqm:
self._tqm.cleanup()
if loader:
loader.cleanup_all_tmp_files()
return result
def main(args=None):
AdHocCLI.cli_executor(args)
if __name__ == '__main__':
main()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,112 |
Allow ansible -a to parse from json
|
### Summary
Using the adhoc `ansible` cmd parses the value of `-a` uses the key=value splitter. It would be good for it to support parsing from json allowing people to specify nested structures and have the command/win_command/shell/etc modules support extra options outside the hardcoded list at https://github.com/ansible/ansible/blob/e8a77626a3b33832783433817108cbfbb84227ea/lib/ansible/parsing/splitter.py#L91.
### Issue Type
Feature Idea
### Component Name
ansible
### Additional Information
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78112
|
https://github.com/ansible/ansible/pull/78114
|
d8fefba20e8023822749d538db7e69f0fc86710e
|
ad79c1e0d032eb5dda216055ffc393043de4b380
| 2022-06-21T19:28:58Z |
python
| 2022-08-03T01:24:12Z |
test/integration/targets/adhoc/runme.sh
|
#!/usr/bin/env bash
set -eux
# run type tests
ansible -a 'sleep 20' --task-timeout 5 localhost |grep 'The command action failed to execute in the expected time frame (5) and was terminated'
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,402 |
Docs: galaxy collection dependency resolution/installation behavior is outdated
|
### Summary
The documentation currently has the following [snippet](https://github.com/ansible/ansible/blob/0afc0b8506cc80897073606157c984c9176f545c/docs/docsite/rst/shared_snippets/galaxy_server_list.txt#L86-L88):
> Once a collection is found, any of its requirements are only searched within the same Galaxy instance as the parent
> collection. The install process will not search for a collection requirement in a different Galaxy instance.
@evgeni reported that this is no longer the case: ansible-galaxy can seek out dependencies on other configured Galaxy instances to support the use case where a collection can depend on a collection from another Galaxy instance.
The behavior was changed in the following pull requests:
- https://github.com/ansible/ansible/pull/72576
- https://github.com/ansible/ansible/pull/72685
Considering the security implications of the change, we should remove the outdated documentation and more importantly, document the new behavior to make it clear and manage expectations.
### Issue Type
Documentation Report
### Component Name
docs/docsite/rst/shared_snippets/galaxy_server_list.txt
### Ansible Version
```console
not relevant
```
### Configuration
```console
not relevant
```
### OS / Environment
not relevant
### Additional Information
not relevant
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76402
|
https://github.com/ansible/ansible/pull/78398
|
733a7679485c84d8ee6c4ebf95c76d172e0369de
|
d92f6da592283482e373c519d12afabe954b31f4
| 2021-11-30T14:33:56Z |
python
| 2022-08-04T16:29:19Z |
docs/docsite/rst/shared_snippets/galaxy_server_list.txt
|
By default, ``ansible-galaxy`` uses https://galaxy.ansible.com as the Galaxy server (as listed in the :file:`ansible.cfg` file under :ref:`galaxy_server`).
You can use either option below to configure ``ansible-galaxy collection`` to use other servers (such as a custom Galaxy server):
* Set the server list in the :ref:`galaxy_server_list` configuration option in :ref:`ansible_configuration_settings_locations`.
* Use the ``--server`` command line argument to limit to an individual server.
To configure a Galaxy server list in ``ansible.cfg``:
#. Add the ``server_list`` option under the ``[galaxy]`` section to one or more server names.
#. Create a new section for each server name.
#. Set the ``url`` option for each server name.
#. Optionally, set the API token for each server name. Go to https://galaxy.ansible.com/me/preferences and click :guilabel:`Show API key`.
.. note::
The ``url`` option for each server name must end with a forward slash ``/``. If you do not set the API token in your Galaxy server list, use the ``--api-key`` argument to pass in the token to the ``ansible-galaxy collection publish`` command.
The following example shows how to configure multiple servers:
.. code-block:: ini
[galaxy]
server_list = my_org_hub, release_galaxy, test_galaxy, my_galaxy_ng
[galaxy_server.my_org_hub]
url=https://automation.my_org/
username=my_user
password=my_pass
[galaxy_server.release_galaxy]
url=https://galaxy.ansible.com/
token=my_token
[galaxy_server.test_galaxy]
url=https://galaxy-dev.ansible.com/
token=my_test_token
[galaxy_server.my_galaxy_ng]
url=http://my_galaxy_ng:8000/api/automation-hub/
auth_url=http://my_keycloak:8080/auth/realms/myco/protocol/openid-connect/token
client_id=galaxy-ng
token=my_keycloak_access_token
.. note::
You can use the ``--server`` command line argument to select an explicit Galaxy server in the ``server_list`` and
the value of this argument should match the name of the server. To use a server not in the server list, set the value to the URL to access that server (all servers in the server list will be ignored). Also you cannot use the ``--api-key`` argument for any of the predefined servers. You can only use the ``api_key`` argument if you did not define a server list or if you specify a URL in the
``--server`` argument.
**Galaxy server list configuration options**
The :ref:`galaxy_server_list` option is a list of server identifiers in a prioritized order. When searching for a
collection, the install process will search in that order, for example, ``automation_hub`` first, then ``my_org_hub``, ``release_galaxy``, and
finally ``test_galaxy`` until the collection is found. The actual Galaxy instance is then defined under the section
``[galaxy_server.{{ id }}]`` where ``{{ id }}`` is the server identifier defined in the list. This section can then
define the following keys:
* ``url``: The URL of the Galaxy instance to connect to. Required.
* ``token``: An API token key to use for authentication against the Galaxy instance. Mutually exclusive with ``username``.
* ``username``: The username to use for basic authentication against the Galaxy instance. Mutually exclusive with ``token``.
* ``password``: The password to use, in conjunction with ``username``, for basic authentication.
* ``auth_url``: The URL of a Keycloak server 'token_endpoint' if using SSO authentication (for example, galaxyNG). Mutually exclusive with ``username``. Requires ``token``.
* ``validate_certs``: Whether or not to verify TLS certificates for the Galaxy server. This defaults to True unless the ``--ignore-certs`` option is provided or ``GALAXY_IGNORE_CERTS`` is configured to True.
* ``client_id``: The Keycloak token's client_id to use for authentication. Requires ``auth_url`` and ``token``. The default ``client_id`` is cloud-services to work with Red Hat SSO.
As well as defining these server options in the ``ansible.cfg`` file, you can also define them as environment variables.
The environment variable is in the form ``ANSIBLE_GALAXY_SERVER_{{ id }}_{{ key }}`` where ``{{ id }}`` is the upper
case form of the server identifier and ``{{ key }}`` is the key to define. For example I can define ``token`` for
``release_galaxy`` by setting ``ANSIBLE_GALAXY_SERVER_RELEASE_GALAXY_TOKEN=secret_token``.
For operations that use only one Galaxy server (for example, the ``publish``, ``info``, or ``install`` commands). the ``ansible-galaxy collection`` command uses the first entry in the
``server_list``, unless you pass in an explicit server with the ``--server`` argument.
.. note::
Once a collection is found, any of its requirements are only searched within the same Galaxy instance as the parent
collection. The install process will not search for a collection requirement in a different Galaxy instance.
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,400 |
"collection init" command generates wrong file structure
|
### Summary
As is indicated [in the documentation](https://docs.ansible.com/ansible/latest/dev_guide/developing_collections_creating.html#creating-collections-skeleton):
```
To start a new collection:
collection_dir#> ansible-galaxy collection init my_namespace.my_collection
```
So, that suggests the collection skeleton will be created in the current folder. Instead, creates the structure:
`namespace/collection-name/[collection-skeleton]`
This is confuse, because the current dir is not the expected root dir for the rest of the commands like install or build.
I think the command should create the skeleton in the current dir.
### Issue Type
Bug Report
### Component Name
ansible-galaxy
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.3]
config file = None
configured module search path = ['/home/alex/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/alex/.local/lib/python3.9/site-packages/ansible
ansible collection location = /home/alex/.ansible/collections:/usr/share/ansible/collections
executable location = /home/alex/.local/bin/ansible
python version = 3.9.13 (main, May 18 2022, 00:00:00) [GCC 11.3.1 20220421 (Red Hat 11.3.1-2)]
jinja version = 2.11.3
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
not relevant
```
### OS / Environment
Fedora 34
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```sh (paste below)
ansible-galaxy collection init some-namespace.some-name
```
### Expected Results
The skeleton should be created in the current directory, instead of creating `namespace/collection-name/[collection-skeleton]`
```
.
├── docs
├── galaxy.yml
├── plugins
│ └── README.md
├── README.md
└── roles
```
### Actual Results
```console
.
└── namespace
└── collection_name
├── docs
├── galaxy.yml
├── plugins
│ └── README.md
├── README.md
└── roles
```
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78400
|
https://github.com/ansible/ansible/pull/78404
|
98a5820da6f1ad9b17c5e6e331779553573fc881
|
69c7e3f4a3e244b28eef919ff2c1356ea6d06b1c
| 2022-08-01T14:27:32Z |
python
| 2022-08-09T19:13:42Z |
docs/docsite/rst/dev_guide/developing_collections_creating.rst
|
.. _creating_collections:
********************
Creating collections
********************
To create a collection:
#. Create a :ref:`collection skeleton<creating_collections_skeleton>` with the ``collection init`` command.
#. Add modules and other content to the collection.
#. Build the collection into a collection artifact with :ref:`ansible-galaxy collection build<building_collections>`.
#. Publish the collection artifact to Galaxy with :ref:`ansible-galaxy collection publish<publishing_collections>`.
A user can then install your collection on their systems.
.. contents::
:local:
:depth: 2
.. _creating_collections_skeleton:
Creating a collection skeleton
==============================
To start a new collection:
.. code-block:: bash
collection_dir#> ansible-galaxy collection init my_namespace.my_collection
.. note::
Both the namespace and collection names use the same strict set of requirements. See `Galaxy namespaces <https://galaxy.ansible.com/docs/contributing/namespaces.html#galaxy-namespaces>`_ on the Galaxy docsite for those requirements.
Once the skeleton exists, you can populate the directories with the content you want inside the collection. See `ansible-collections <https://github.com/ansible-collections/>`_ GitHub Org to get a better idea of what you can place inside a collection.
Reference: the ``ansible-galaxy collection`` command
Currently the ``ansible-galaxy collection`` command implements the following sub commands:
* ``init``: Create a basic collection skeleton based on the default template included with Ansible or your own template.
* ``build``: Create a collection artifact that can be uploaded to Galaxy or your own repository.
* ``publish``: Publish a built collection artifact to Galaxy.
* ``install``: Install one or more collections.
To learn more about the ``ansible-galaxy`` command-line tool, see the :ref:`ansible-galaxy` man page.
.. seealso::
:ref:`collections`
Learn how to install and use collections.
:ref:`collection_structure`
Directories and files included in the collection skeleton
`Mailing List <https://groups.google.com/group/ansible-devel>`_
The development mailing list
:ref:`communication_irc`
How to join Ansible chat channels
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,142 |
ansible.builtin.file fails to change permissions on hard links unless you explicitly define 'state: file'
|
### Summary
When using the file module to iterate over a list of files, some of which could be hardlinks, the file module fails with "src is required for creating new hardlinks" However, I am simply trying to modify the permissions of the files, not create new ones.
For example, I gather the list of files like so (keep in mind some of these are regular files, some of them are hardlinks):
```
- name: Finding all *.crt files in the pki directory
find:
paths: "{{ item }}"
patterns: '*.crt'
loop: "{{ crt_directories }}"
register: crt_find
```
So at this point, `crt_find` is a list of files and hard links.
Then, I iterate over `crt_find` to set the permissions on all of the files and hardlinks using the file module:
```
- name: Setting certificate file Permissions
file:
path: "{{ item.1.path }}"
mode: 0644
loop: "{{ crt_find.results | subelements('files') }}"
```
This works perfectly fine for regular files. When it gets to a hardlink, it fails with "src is required for creating new hardlinks." The problem is, I'm not trying to create a new hardlink. I'm simply trying to modify the permissions.
Interestingly, this goes away if I explicitly define `state: file` as a parameter on the file module task:
```
- name: Setting certificate file Permissions
file:
path: "{{ item.1.path }}"
state: file # <---------------This fixes the problem
mode: 0644
loop: "{{ crt_find.results | subelements('files') }}"
```
While I've clearly found a solution to my problem, it definitely seems like a bug with the file module, as `state: file` is the default if omitted.
### Issue Type
Bug Report
### Component Name
ansible.builtin.file
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = /Users/me/.ansible.cfg
configured module search path = ['/Users/me/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/me/venv/lib/python3.8/site-packages/ansible
ansible collection location = /Users/me/.ansible/collections:/usr/share/ansible/collections
executable location = /Users/me/venv/bin/ansible
python version = 3.8.2 (default, Jun 8 2021, 11:59:35) [Clang 12.0.5 (clang-1205.0.22.11)]
jinja version = 3.0.1
libyaml = False
```
### Configuration
```console
$ ansible-config dump --only-changed
DEFAULT_ROLES_PATH(/Users/me/.ansible.cfg) = ['/Users/me/repos']
DEFAULT_STDOUT_CALLBACK(env: ANSIBLE_STDOUT_CALLBACK) = yaml
HOST_KEY_CHECKING(/Users/me/.ansible.cfg) = False
```
### OS / Environment
Control machine = MacOS
Target machine = Ubuntu 20
### Steps to Reproduce
Stated in the Summary
### Expected Results
Stated in the Summary
### Actual Results
```console
Stated in the Summary
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76142
|
https://github.com/ansible/ansible/pull/76167
|
69c7e3f4a3e244b28eef919ff2c1356ea6d06b1c
|
3f1838bf918a055b491ab25d6caee2e5022db3c1
| 2021-10-26T14:18:11Z |
python
| 2022-08-10T20:01:31Z |
changelogs/fragments/76167-update-attributes-of-files-that-are-links.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,142 |
ansible.builtin.file fails to change permissions on hard links unless you explicitly define 'state: file'
|
### Summary
When using the file module to iterate over a list of files, some of which could be hardlinks, the file module fails with "src is required for creating new hardlinks" However, I am simply trying to modify the permissions of the files, not create new ones.
For example, I gather the list of files like so (keep in mind some of these are regular files, some of them are hardlinks):
```
- name: Finding all *.crt files in the pki directory
find:
paths: "{{ item }}"
patterns: '*.crt'
loop: "{{ crt_directories }}"
register: crt_find
```
So at this point, `crt_find` is a list of files and hard links.
Then, I iterate over `crt_find` to set the permissions on all of the files and hardlinks using the file module:
```
- name: Setting certificate file Permissions
file:
path: "{{ item.1.path }}"
mode: 0644
loop: "{{ crt_find.results | subelements('files') }}"
```
This works perfectly fine for regular files. When it gets to a hardlink, it fails with "src is required for creating new hardlinks." The problem is, I'm not trying to create a new hardlink. I'm simply trying to modify the permissions.
Interestingly, this goes away if I explicitly define `state: file` as a parameter on the file module task:
```
- name: Setting certificate file Permissions
file:
path: "{{ item.1.path }}"
state: file # <---------------This fixes the problem
mode: 0644
loop: "{{ crt_find.results | subelements('files') }}"
```
While I've clearly found a solution to my problem, it definitely seems like a bug with the file module, as `state: file` is the default if omitted.
### Issue Type
Bug Report
### Component Name
ansible.builtin.file
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = /Users/me/.ansible.cfg
configured module search path = ['/Users/me/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/me/venv/lib/python3.8/site-packages/ansible
ansible collection location = /Users/me/.ansible/collections:/usr/share/ansible/collections
executable location = /Users/me/venv/bin/ansible
python version = 3.8.2 (default, Jun 8 2021, 11:59:35) [Clang 12.0.5 (clang-1205.0.22.11)]
jinja version = 3.0.1
libyaml = False
```
### Configuration
```console
$ ansible-config dump --only-changed
DEFAULT_ROLES_PATH(/Users/me/.ansible.cfg) = ['/Users/me/repos']
DEFAULT_STDOUT_CALLBACK(env: ANSIBLE_STDOUT_CALLBACK) = yaml
HOST_KEY_CHECKING(/Users/me/.ansible.cfg) = False
```
### OS / Environment
Control machine = MacOS
Target machine = Ubuntu 20
### Steps to Reproduce
Stated in the Summary
### Expected Results
Stated in the Summary
### Actual Results
```console
Stated in the Summary
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76142
|
https://github.com/ansible/ansible/pull/76167
|
69c7e3f4a3e244b28eef919ff2c1356ea6d06b1c
|
3f1838bf918a055b491ab25d6caee2e5022db3c1
| 2021-10-26T14:18:11Z |
python
| 2022-08-10T20:01:31Z |
lib/ansible/modules/file.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <[email protected]>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: file
version_added: historical
short_description: Manage files and file properties
extends_documentation_fragment: [files, action_common_attributes]
description:
- Set attributes of files, symlinks or directories.
- Alternatively, remove files, symlinks or directories.
- Many other modules support the same options as the C(file) module - including M(ansible.builtin.copy),
M(ansible.builtin.template), and M(ansible.builtin.assemble).
- For Windows targets, use the M(ansible.windows.win_file) module instead.
options:
path:
description:
- Path to the file being managed.
type: path
required: yes
aliases: [ dest, name ]
state:
description:
- If C(absent), directories will be recursively deleted, and files or symlinks will
be unlinked. In the case of a directory, if C(diff) is declared, you will see the files and folders deleted listed
under C(path_contents). Note that C(absent) will not cause C(file) to fail if the C(path) does
not exist as the state did not change.
- If C(directory), all intermediate subdirectories will be created if they
do not exist. Since Ansible 1.7 they will be created with the supplied permissions.
- If C(file), with no other options, returns the current state of C(path).
- If C(file), even with other options (such as C(mode)), the file will be modified if it exists but will NOT be created if it does not exist.
Set to C(touch) or use the M(ansible.builtin.copy) or M(ansible.builtin.template) module if you want to create the file if it does not exist.
- If C(hard), the hard link will be created or changed.
- If C(link), the symbolic link will be created or changed.
- If C(touch) (new in 1.4), an empty file will be created if the file does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way C(touch) works from the command line).
type: str
default: file
choices: [ absent, directory, file, hard, link, touch ]
src:
description:
- Path of the file to link to.
- This applies only to C(state=link) and C(state=hard).
- For C(state=link), this will also accept a non-existing path.
- Relative paths are relative to the file being created (C(path)) which is how
the Unix command C(ln -s SRC DEST) treats relative paths.
type: path
recurse:
description:
- Recursively set the specified file attributes on directory contents.
- This applies only when C(state) is set to C(directory).
type: bool
default: no
version_added: '1.1'
force:
description:
- >
Force the creation of the symlinks in two cases: the source file does
not exist (but will appear later); the destination exists and is a file (so, we need to unlink the
C(path) file and create symlink to the C(src) file in place of it).
type: bool
default: no
follow:
description:
- This flag indicates that filesystem links, if they exist, should be followed.
- Previous to Ansible 2.5, this was C(no) by default.
type: bool
default: yes
version_added: '1.8'
modification_time:
description:
- This parameter indicates the time the file's modification time should be set to.
- Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
- Default is None meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
type: str
version_added: "2.7"
modification_time_format:
description:
- When used with C(modification_time), indicates the time format that must be used.
- Based on default Python format (see time.strftime doc).
type: str
default: "%Y%m%d%H%M.%S"
version_added: '2.7'
access_time:
description:
- This parameter indicates the time the file's access time should be set to.
- Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
- Default is C(None) meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
type: str
version_added: '2.7'
access_time_format:
description:
- When used with C(access_time), indicates the time format that must be used.
- Based on default Python format (see time.strftime doc).
type: str
default: "%Y%m%d%H%M.%S"
version_added: '2.7'
seealso:
- module: ansible.builtin.assemble
- module: ansible.builtin.copy
- module: ansible.builtin.stat
- module: ansible.builtin.template
- module: ansible.windows.win_file
attributes:
check_mode:
support: full
diff_mode:
details: permissions and ownership will be shown but file contents on absent/touch will not.
support: partial
platform:
platforms: posix
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = r'''
- name: Change file ownership, group and permissions
ansible.builtin.file:
path: /etc/foo.conf
owner: foo
group: foo
mode: '0644'
- name: Give insecure permissions to an existing file
ansible.builtin.file:
path: /work
owner: root
group: root
mode: '1777'
- name: Create a symbolic link
ansible.builtin.file:
src: /file/to/link/to
dest: /path/to/symlink
owner: foo
group: foo
state: link
- name: Create two hard links
ansible.builtin.file:
src: '/tmp/{{ item.src }}'
dest: '{{ item.dest }}'
state: hard
loop:
- { src: x, dest: y }
- { src: z, dest: k }
- name: Touch a file, using symbolic modes to set the permissions (equivalent to 0644)
ansible.builtin.file:
path: /etc/foo.conf
state: touch
mode: u=rw,g=r,o=r
- name: Touch the same file, but add/remove some permissions
ansible.builtin.file:
path: /etc/foo.conf
state: touch
mode: u+rw,g-wx,o-rwx
- name: Touch again the same file, but do not change times this makes the task idempotent
ansible.builtin.file:
path: /etc/foo.conf
state: touch
mode: u+rw,g-wx,o-rwx
modification_time: preserve
access_time: preserve
- name: Create a directory if it does not exist
ansible.builtin.file:
path: /etc/some_directory
state: directory
mode: '0755'
- name: Update modification and access time of given file
ansible.builtin.file:
path: /etc/some_file
state: file
modification_time: now
access_time: now
- name: Set access time based on seconds from epoch value
ansible.builtin.file:
path: /etc/another_file
state: file
access_time: '{{ "%Y%m%d%H%M.%S" | strftime(stat_var.stat.atime) }}'
- name: Recursively change ownership of a directory
ansible.builtin.file:
path: /etc/foo
state: directory
recurse: yes
owner: foo
group: foo
- name: Remove file (delete file)
ansible.builtin.file:
path: /etc/foo.txt
state: absent
- name: Recursively remove directory
ansible.builtin.file:
path: /etc/foo
state: absent
'''
RETURN = r'''
dest:
description: Destination file/path, equal to the value passed to I(path).
returned: state=touch, state=hard, state=link
type: str
sample: /path/to/file.txt
path:
description: Destination file/path, equal to the value passed to I(path).
returned: state=absent, state=directory, state=file
type: str
sample: /path/to/file.txt
'''
import errno
import os
import shutil
import sys
import time
from pwd import getpwnam, getpwuid
from grp import getgrnam, getgrgid
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
# There will only be a single AnsibleModule object per module
module = None
class AnsibleModuleError(Exception):
def __init__(self, results):
self.results = results
def __repr__(self):
return 'AnsibleModuleError(results={0})'.format(self.results)
class ParameterError(AnsibleModuleError):
pass
class Sentinel(object):
def __new__(cls, *args, **kwargs):
return cls
def _ansible_excepthook(exc_type, exc_value, tb):
# Using an exception allows us to catch it if the calling code knows it can recover
if issubclass(exc_type, AnsibleModuleError):
module.fail_json(**exc_value.results)
else:
sys.__excepthook__(exc_type, exc_value, tb)
def additional_parameter_handling(params):
"""Additional parameter validation and reformatting"""
# When path is a directory, rewrite the pathname to be the file inside of the directory
# TODO: Why do we exclude link? Why don't we exclude directory? Should we exclude touch?
# I think this is where we want to be in the future:
# when isdir(path):
# if state == absent: Remove the directory
# if state == touch: Touch the directory
# if state == directory: Assert the directory is the same as the one specified
# if state == file: place inside of the directory (use _original_basename)
# if state == link: place inside of the directory (use _original_basename. Fallback to src?)
# if state == hard: place inside of the directory (use _original_basename. Fallback to src?)
if (params['state'] not in ("link", "absent") and os.path.isdir(to_bytes(params['path'], errors='surrogate_or_strict'))):
basename = None
if params['_original_basename']:
basename = params['_original_basename']
elif params['src']:
basename = os.path.basename(params['src'])
if basename:
params['path'] = os.path.join(params['path'], basename)
# state should default to file, but since that creates many conflicts,
# default state to 'current' when it exists.
prev_state = get_state(to_bytes(params['path'], errors='surrogate_or_strict'))
if params['state'] is None:
if prev_state != 'absent':
params['state'] = prev_state
elif params['recurse']:
params['state'] = 'directory'
else:
params['state'] = 'file'
# make sure the target path is a directory when we're doing a recursive operation
if params['recurse'] and params['state'] != 'directory':
raise ParameterError(results={"msg": "recurse option requires state to be 'directory'",
"path": params["path"]})
# Fail if 'src' but no 'state' is specified
if params['src'] and params['state'] not in ('link', 'hard'):
raise ParameterError(results={'msg': "src option requires state to be 'link' or 'hard'",
'path': params['path']})
def get_state(path):
''' Find out current state '''
b_path = to_bytes(path, errors='surrogate_or_strict')
try:
if os.path.lexists(b_path):
if os.path.islink(b_path):
return 'link'
elif os.path.isdir(b_path):
return 'directory'
elif os.stat(b_path).st_nlink > 1:
return 'hard'
# could be many other things, but defaulting to file
return 'file'
return 'absent'
except OSError as e:
if e.errno == errno.ENOENT: # It may already have been removed
return 'absent'
else:
raise
# This should be moved into the common file utilities
def recursive_set_attributes(b_path, follow, file_args, mtime, atime):
changed = False
try:
for b_root, b_dirs, b_files in os.walk(b_path):
for b_fsobj in b_dirs + b_files:
b_fsname = os.path.join(b_root, b_fsobj)
if not os.path.islink(b_fsname):
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
else:
# Change perms on the link
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
if follow:
b_fsname = os.path.join(b_root, os.readlink(b_fsname))
# The link target could be nonexistent
if os.path.exists(b_fsname):
if os.path.isdir(b_fsname):
# Link is a directory so change perms on the directory's contents
changed |= recursive_set_attributes(b_fsname, follow, file_args, mtime, atime)
# Change perms on the file pointed to by the link
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
except RuntimeError as e:
# on Python3 "RecursionError" is raised which is derived from "RuntimeError"
# TODO once this function is moved into the common file utilities, this should probably raise more general exception
raise AnsibleModuleError(
results={'msg': "Could not recursively set attributes on %s. Original error was: '%s'" % (to_native(b_path), to_native(e))}
)
return changed
def initial_diff(path, state, prev_state):
diff = {'before': {'path': path},
'after': {'path': path},
}
if prev_state != state:
diff['before']['state'] = prev_state
diff['after']['state'] = state
if state == 'absent' and prev_state == 'directory':
walklist = {
'directories': [],
'files': [],
}
b_path = to_bytes(path, errors='surrogate_or_strict')
for base_path, sub_folders, files in os.walk(b_path):
for folder in sub_folders:
folderpath = os.path.join(base_path, folder)
walklist['directories'].append(folderpath)
for filename in files:
filepath = os.path.join(base_path, filename)
walklist['files'].append(filepath)
diff['before']['path_content'] = walklist
return diff
#
# States
#
def get_timestamp_for_time(formatted_time, time_format):
if formatted_time == 'preserve':
return None
elif formatted_time == 'now':
return Sentinel
else:
try:
struct = time.strptime(formatted_time, time_format)
struct_time = time.mktime(struct)
except (ValueError, OverflowError) as e:
raise AnsibleModuleError(results={'msg': 'Error while obtaining timestamp for time %s using format %s: %s'
% (formatted_time, time_format, to_native(e, nonstring='simplerepr'))})
return struct_time
def update_timestamp_for_file(path, mtime, atime, diff=None):
b_path = to_bytes(path, errors='surrogate_or_strict')
try:
# When mtime and atime are set to 'now', rely on utime(path, None) which does not require ownership of the file
# https://github.com/ansible/ansible/issues/50943
if mtime is Sentinel and atime is Sentinel:
# It's not exact but we can't rely on os.stat(path).st_mtime after setting os.utime(path, None) as it may
# not be updated. Just use the current time for the diff values
mtime = atime = time.time()
previous_mtime = os.stat(b_path).st_mtime
previous_atime = os.stat(b_path).st_atime
set_time = None
else:
# If both parameters are None 'preserve', nothing to do
if mtime is None and atime is None:
return False
previous_mtime = os.stat(b_path).st_mtime
previous_atime = os.stat(b_path).st_atime
if mtime is None:
mtime = previous_mtime
elif mtime is Sentinel:
mtime = time.time()
if atime is None:
atime = previous_atime
elif atime is Sentinel:
atime = time.time()
# If both timestamps are already ok, nothing to do
if mtime == previous_mtime and atime == previous_atime:
return False
set_time = (atime, mtime)
if not module.check_mode:
os.utime(b_path, set_time)
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
if 'after' not in diff:
diff['after'] = {}
if mtime != previous_mtime:
diff['before']['mtime'] = previous_mtime
diff['after']['mtime'] = mtime
if atime != previous_atime:
diff['before']['atime'] = previous_atime
diff['after']['atime'] = atime
except OSError as e:
raise AnsibleModuleError(results={'msg': 'Error while updating modification or access time: %s'
% to_native(e, nonstring='simplerepr'), 'path': path})
return True
def keep_backward_compatibility_on_timestamps(parameter, state):
if state in ['file', 'hard', 'directory', 'link'] and parameter is None:
return 'preserve'
elif state == 'touch' and parameter is None:
return 'now'
else:
return parameter
def execute_diff_peek(path):
"""Take a guess as to whether a file is a binary file"""
b_path = to_bytes(path, errors='surrogate_or_strict')
appears_binary = False
try:
with open(b_path, 'rb') as f:
head = f.read(8192)
except Exception:
# If we can't read the file, we're okay assuming it's text
pass
else:
if b"\x00" in head:
appears_binary = True
return appears_binary
def ensure_absent(path):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
result = {}
if prev_state != 'absent':
diff = initial_diff(path, 'absent', prev_state)
if not module.check_mode:
if prev_state == 'directory':
try:
shutil.rmtree(b_path, ignore_errors=False)
except Exception as e:
raise AnsibleModuleError(results={'msg': "rmtree failed: %s" % to_native(e)})
else:
try:
os.unlink(b_path)
except OSError as e:
if e.errno != errno.ENOENT: # It may already have been removed
raise AnsibleModuleError(results={'msg': "unlinking failed: %s " % to_native(e),
'path': path})
result.update({'path': path, 'changed': True, 'diff': diff, 'state': 'absent'})
else:
result.update({'path': path, 'changed': False, 'state': 'absent'})
return result
def execute_touch(path, follow, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
changed = False
result = {'dest': path}
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
if not module.check_mode:
if prev_state == 'absent':
# Create an empty file if the filename did not already exist
try:
open(b_path, 'wb').close()
changed = True
except (OSError, IOError) as e:
raise AnsibleModuleError(results={'msg': 'Error, could not touch target: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
# Update the attributes on the file
diff = initial_diff(path, 'touch', prev_state)
file_args = module.load_file_common_arguments(module.params)
try:
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except SystemExit as e:
if e.code: # this is the exit code passed to sys.exit, not a constant -- pylint: disable=using-constant-test
# We take this to mean that fail_json() was called from
# somewhere in basic.py
if prev_state == 'absent':
# If we just created the file we can safely remove it
os.remove(b_path)
raise
result['changed'] = changed
result['diff'] = diff
return result
def ensure_file_attributes(path, follow, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
if prev_state != 'file':
if follow and prev_state == 'link':
# follow symlink and operate on original
b_path = os.path.realpath(b_path)
path = to_native(b_path, errors='strict')
prev_state = get_state(b_path)
file_args['path'] = path
if prev_state not in ('file', 'hard'):
# file is not absent and any other state is a conflict
raise AnsibleModuleError(results={'msg': 'file (%s) is %s, cannot continue' % (path, prev_state),
'path': path, 'state': prev_state})
diff = initial_diff(path, 'file', prev_state)
changed = module.set_fs_attributes_if_different(file_args, False, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'path': path, 'changed': changed, 'diff': diff}
def ensure_directory(path, follow, recurse, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# For followed symlinks, we need to operate on the target of the link
if follow and prev_state == 'link':
b_path = os.path.realpath(b_path)
path = to_native(b_path, errors='strict')
file_args['path'] = path
prev_state = get_state(b_path)
changed = False
diff = initial_diff(path, 'directory', prev_state)
if prev_state == 'absent':
# Create directory and assign permissions to it
if module.check_mode:
return {'path': path, 'changed': True, 'diff': diff}
curpath = ''
try:
# Split the path so we can apply filesystem attributes recursively
# from the root (/) directory for absolute paths or the base path
# of a relative path. We can then walk the appropriate directory
# path to apply attributes.
# Something like mkdir -p with mode applied to all of the newly created directories
for dirname in path.strip('/').split('/'):
curpath = '/'.join([curpath, dirname])
# Remove leading slash if we're creating a relative path
if not os.path.isabs(path):
curpath = curpath.lstrip('/')
b_curpath = to_bytes(curpath, errors='surrogate_or_strict')
if not os.path.exists(b_curpath):
try:
os.mkdir(b_curpath)
changed = True
except OSError as ex:
# Possibly something else created the dir since the os.path.exists
# check above. As long as it's a dir, we don't need to error out.
if not (ex.errno == errno.EEXIST and os.path.isdir(b_curpath)):
raise
tmp_file_args = file_args.copy()
tmp_file_args['path'] = curpath
changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except Exception as e:
raise AnsibleModuleError(results={'msg': 'There was an issue creating %s as requested:'
' %s' % (curpath, to_native(e)),
'path': path})
return {'path': path, 'changed': changed, 'diff': diff}
elif prev_state != 'directory':
# We already know prev_state is not 'absent', therefore it exists in some form.
raise AnsibleModuleError(results={'msg': '%s already exists as a %s' % (path, prev_state),
'path': path})
#
# previous state == directory
#
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
if recurse:
changed |= recursive_set_attributes(b_path, follow, file_args, mtime, atime)
return {'path': path, 'changed': changed, 'diff': diff}
def ensure_symlink(path, src, follow, force, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
prev_state = get_state(b_path)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# source is both the source of a symlink or an informational passing of the src for a template module
# or copy module, even if this module never uses it, it is needed to key off some things
if src is None:
if follow:
# use the current target of the link as the source
src = to_native(os.readlink(b_path), errors='strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
if not os.path.islink(b_path) and os.path.isdir(b_path):
relpath = path
else:
b_relpath = os.path.dirname(b_path)
relpath = to_native(b_relpath, errors='strict')
absrc = os.path.join(relpath, src)
b_absrc = to_bytes(absrc, errors='surrogate_or_strict')
if not force and not os.path.exists(b_absrc):
raise AnsibleModuleError(results={'msg': 'src file does not exist, use "force=yes" if you'
' really want to create the link: %s' % absrc,
'path': path, 'src': src})
if prev_state == 'directory':
if not force:
raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
% (prev_state, path),
'path': path})
elif os.listdir(b_path):
# refuse to replace a directory that has files in it
raise AnsibleModuleError(results={'msg': 'the directory %s is not empty, refusing to'
' convert it' % path,
'path': path})
elif prev_state in ('file', 'hard') and not force:
raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
% (prev_state, path),
'path': path})
diff = initial_diff(path, 'link', prev_state)
changed = False
if prev_state in ('hard', 'file', 'directory', 'absent'):
changed = True
elif prev_state == 'link':
b_old_src = os.readlink(b_path)
if b_old_src != b_src:
diff['before']['src'] = to_native(b_old_src, errors='strict')
diff['after']['src'] = src
changed = True
else:
raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
if changed and not module.check_mode:
if prev_state != 'absent':
# try to replace atomically
b_tmppath = to_bytes(os.path.sep).join(
[os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
)
try:
if prev_state == 'directory':
os.rmdir(b_path)
os.symlink(b_src, b_tmppath)
os.rename(b_tmppath, b_path)
except OSError as e:
if os.path.exists(b_tmppath):
os.unlink(b_tmppath)
raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
else:
try:
os.symlink(b_src, b_path)
except OSError as e:
raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
if module.check_mode and not os.path.exists(b_path):
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
# Now that we might have created the symlink, get the arguments.
# We need to do it now so we can properly follow the symlink if needed
# because load_file_common_arguments sets 'path' according
# the value of follow and the symlink existence.
file_args = module.load_file_common_arguments(module.params)
# Whenever we create a link to a nonexistent target we know that the nonexistent target
# cannot have any permissions set on it. Skip setting those and emit a warning (the user
# can set follow=False to remove the warning)
if follow and os.path.islink(b_path) and not os.path.exists(file_args['path']):
module.warn('Cannot set fs attributes on a non-existent symlink target. follow should be'
' set to False to avoid this.')
else:
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
def ensure_hardlink(path, src, follow, force, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# src is the source of a hardlink. We require it if we are creating a new hardlink.
# We require path in the argument_spec so we know it is present at this point.
if src is None:
raise AnsibleModuleError(results={'msg': 'src is required for creating new hardlinks'})
if not os.path.exists(b_src):
raise AnsibleModuleError(results={'msg': 'src does not exist', 'dest': path, 'src': src})
diff = initial_diff(path, 'hard', prev_state)
changed = False
if prev_state == 'absent':
changed = True
elif prev_state == 'link':
b_old_src = os.readlink(b_path)
if b_old_src != b_src:
diff['before']['src'] = to_native(b_old_src, errors='strict')
diff['after']['src'] = src
changed = True
elif prev_state == 'hard':
if not os.stat(b_path).st_ino == os.stat(b_src).st_ino:
changed = True
if not force:
raise AnsibleModuleError(results={'msg': 'Cannot link, different hard link exists at destination',
'dest': path, 'src': src})
elif prev_state == 'file':
changed = True
if not force:
raise AnsibleModuleError(results={'msg': 'Cannot link, %s exists at destination' % prev_state,
'dest': path, 'src': src})
elif prev_state == 'directory':
changed = True
if os.path.exists(b_path):
if os.stat(b_path).st_ino == os.stat(b_src).st_ino:
return {'path': path, 'changed': False}
elif not force:
raise AnsibleModuleError(results={'msg': 'Cannot link: different hard link exists at destination',
'dest': path, 'src': src})
else:
raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
if changed and not module.check_mode:
if prev_state != 'absent':
# try to replace atomically
b_tmppath = to_bytes(os.path.sep).join(
[os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
)
try:
if prev_state == 'directory':
if os.path.exists(b_path):
try:
os.unlink(b_path)
except OSError as e:
if e.errno != errno.ENOENT: # It may already have been removed
raise
os.link(b_src, b_tmppath)
os.rename(b_tmppath, b_path)
except OSError as e:
if os.path.exists(b_tmppath):
os.unlink(b_tmppath)
raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
else:
try:
os.link(b_src, b_path)
except OSError as e:
raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
if module.check_mode and not os.path.exists(b_path):
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
def check_owner_exists(module, owner):
try:
uid = int(owner)
try:
getpwuid(uid).pw_name
except KeyError:
module.warn('failed to look up user with uid %s. Create user up to this point in real play' % uid)
except ValueError:
try:
getpwnam(owner).pw_uid
except KeyError:
module.warn('failed to look up user %s. Create user up to this point in real play' % owner)
def check_group_exists(module, group):
try:
gid = int(group)
try:
getgrgid(gid).gr_name
except KeyError:
module.warn('failed to look up group with gid %s. Create group up to this point in real play' % gid)
except ValueError:
try:
getgrnam(group).gr_gid
except KeyError:
module.warn('failed to look up group %s. Create group up to this point in real play' % group)
def main():
global module
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['absent', 'directory', 'file', 'hard', 'link', 'touch']),
path=dict(type='path', required=True, aliases=['dest', 'name']),
_original_basename=dict(type='str'), # Internal use only, for recursive ops
recurse=dict(type='bool', default=False),
force=dict(type='bool', default=False), # Note: Should not be in file_common_args in future
follow=dict(type='bool', default=True), # Note: Different default than file_common_args
_diff_peek=dict(type='bool'), # Internal use only, for internal checks in the action plugins
src=dict(type='path'), # Note: Should not be in file_common_args in future
modification_time=dict(type='str'),
modification_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
access_time=dict(type='str'),
access_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
),
add_file_common_args=True,
supports_check_mode=True,
)
# When we rewrite basic.py, we will do something similar to this on instantiating an AnsibleModule
sys.excepthook = _ansible_excepthook
additional_parameter_handling(module.params)
params = module.params
state = params['state']
recurse = params['recurse']
force = params['force']
follow = params['follow']
path = params['path']
src = params['src']
if module.check_mode and state != 'absent':
file_args = module.load_file_common_arguments(module.params)
if file_args['owner']:
check_owner_exists(module, file_args['owner'])
if file_args['group']:
check_group_exists(module, file_args['group'])
timestamps = {}
timestamps['modification_time'] = keep_backward_compatibility_on_timestamps(params['modification_time'], state)
timestamps['modification_time_format'] = params['modification_time_format']
timestamps['access_time'] = keep_backward_compatibility_on_timestamps(params['access_time'], state)
timestamps['access_time_format'] = params['access_time_format']
# short-circuit for diff_peek
if params['_diff_peek'] is not None:
appears_binary = execute_diff_peek(to_bytes(path, errors='surrogate_or_strict'))
module.exit_json(path=path, changed=False, appears_binary=appears_binary)
if state == 'file':
result = ensure_file_attributes(path, follow, timestamps)
elif state == 'directory':
result = ensure_directory(path, follow, recurse, timestamps)
elif state == 'link':
result = ensure_symlink(path, src, follow, force, timestamps)
elif state == 'hard':
result = ensure_hardlink(path, src, follow, force, timestamps)
elif state == 'touch':
result = execute_touch(path, follow, timestamps)
elif state == 'absent':
result = ensure_absent(path)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,142 |
ansible.builtin.file fails to change permissions on hard links unless you explicitly define 'state: file'
|
### Summary
When using the file module to iterate over a list of files, some of which could be hardlinks, the file module fails with "src is required for creating new hardlinks" However, I am simply trying to modify the permissions of the files, not create new ones.
For example, I gather the list of files like so (keep in mind some of these are regular files, some of them are hardlinks):
```
- name: Finding all *.crt files in the pki directory
find:
paths: "{{ item }}"
patterns: '*.crt'
loop: "{{ crt_directories }}"
register: crt_find
```
So at this point, `crt_find` is a list of files and hard links.
Then, I iterate over `crt_find` to set the permissions on all of the files and hardlinks using the file module:
```
- name: Setting certificate file Permissions
file:
path: "{{ item.1.path }}"
mode: 0644
loop: "{{ crt_find.results | subelements('files') }}"
```
This works perfectly fine for regular files. When it gets to a hardlink, it fails with "src is required for creating new hardlinks." The problem is, I'm not trying to create a new hardlink. I'm simply trying to modify the permissions.
Interestingly, this goes away if I explicitly define `state: file` as a parameter on the file module task:
```
- name: Setting certificate file Permissions
file:
path: "{{ item.1.path }}"
state: file # <---------------This fixes the problem
mode: 0644
loop: "{{ crt_find.results | subelements('files') }}"
```
While I've clearly found a solution to my problem, it definitely seems like a bug with the file module, as `state: file` is the default if omitted.
### Issue Type
Bug Report
### Component Name
ansible.builtin.file
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = /Users/me/.ansible.cfg
configured module search path = ['/Users/me/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/me/venv/lib/python3.8/site-packages/ansible
ansible collection location = /Users/me/.ansible/collections:/usr/share/ansible/collections
executable location = /Users/me/venv/bin/ansible
python version = 3.8.2 (default, Jun 8 2021, 11:59:35) [Clang 12.0.5 (clang-1205.0.22.11)]
jinja version = 3.0.1
libyaml = False
```
### Configuration
```console
$ ansible-config dump --only-changed
DEFAULT_ROLES_PATH(/Users/me/.ansible.cfg) = ['/Users/me/repos']
DEFAULT_STDOUT_CALLBACK(env: ANSIBLE_STDOUT_CALLBACK) = yaml
HOST_KEY_CHECKING(/Users/me/.ansible.cfg) = False
```
### OS / Environment
Control machine = MacOS
Target machine = Ubuntu 20
### Steps to Reproduce
Stated in the Summary
### Expected Results
Stated in the Summary
### Actual Results
```console
Stated in the Summary
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76142
|
https://github.com/ansible/ansible/pull/76167
|
69c7e3f4a3e244b28eef919ff2c1356ea6d06b1c
|
3f1838bf918a055b491ab25d6caee2e5022db3c1
| 2021-10-26T14:18:11Z |
python
| 2022-08-10T20:01:31Z |
test/integration/targets/file/tasks/main.yml
|
# Test code for the file module.
# (c) 2014, Richard Isaacson <[email protected]>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
- set_fact:
remote_tmp_dir_test: '{{ remote_tmp_dir }}/file'
- set_fact:
output_file: '{{remote_tmp_dir_test}}/foo.txt'
# same as expanduser & expandvars called on managed host
- command: 'echo {{ output_file }}'
register: echo
- set_fact:
remote_file_expanded: '{{ echo.stdout }}'
# Import the test tasks
- name: Run tests for state=link
import_tasks: state_link.yml
- name: Run tests for directory as dest
import_tasks: directory_as_dest.yml
- name: Run tests for unicode
import_tasks: unicode_path.yml
environment:
LC_ALL: C
LANG: C
- name: decide to include or not include selinux tests
include_tasks: selinux_tests.yml
when: selinux_installed is defined and selinux_installed.stdout != "" and selinux_enabled.stdout != "Disabled"
- name: Initialize the test output dir
import_tasks: initialize.yml
- name: Test _diff_peek
import_tasks: diff_peek.yml
- name: Test modification time
import_tasks: modification_time.yml
# These tests need to be organized by state parameter into separate files later
- name: verify that we are checking a file and it is present
file: path={{output_file}} state=file
register: file_result
- name: verify that the file was marked as changed
assert:
that:
- "file_result.changed == false"
- "file_result.state == 'file'"
- name: Make sure file does not exist
file:
path: /tmp/ghost
state: absent
- name: Target a file that does not exist
file:
path: /tmp/ghost
ignore_errors: yes
register: ghost_file_result
- name: Validate ghost file results
assert:
that:
- ghost_file_result is failed
- ghost_file_result is not changed
- ghost_file_result.state == 'absent'
- "'cannot continue' in ghost_file_result.msg"
- name: verify that we are checking an absent file
file: path={{remote_tmp_dir_test}}/bar.txt state=absent
register: file2_result
- name: verify that the file was marked as changed
assert:
that:
- "file2_result.changed == false"
- "file2_result.state == 'absent'"
- name: verify we can touch a file
file:
path: "{{remote_tmp_dir_test}}/baz.txt"
state: touch
mode: '0644'
register: file3_result
- name: verify that the file was marked as changed
assert:
that:
- "file3_result.changed == true"
- "file3_result.state == 'file'"
- "file3_result.mode == '0644'"
- name: change file mode
file: path={{remote_tmp_dir_test}}/baz.txt mode=0600
register: file4_result
- name: verify that the file was marked as changed
assert:
that:
- "file4_result.changed == true"
- "file4_result.mode == '0600'"
- name: define file to verify chattr/lsattr with
set_fact:
attributes_file: "{{ remote_tmp_dir_test }}/attributes.txt"
attributes_supported: no
- name: create file to verify chattr/lsattr with
command: touch "{{ attributes_file }}"
- name: add "A" attribute to file
command: chattr +A "{{ attributes_file }}"
ignore_errors: yes
- name: get attributes from file
command: lsattr -d "{{ attributes_file }}"
register: attribute_A_set
ignore_errors: yes
- name: remove "A" attribute from file
command: chattr -A "{{ attributes_file }}"
ignore_errors: yes
- name: get attributes from file
command: lsattr -d "{{ attributes_file }}"
register: attribute_A_unset
ignore_errors: yes
- name: determine if chattr/lsattr is supported
set_fact:
attributes_supported: yes
when:
- attribute_A_set is success
- "'A' in attribute_A_set.stdout_lines[0].split()[0]"
- attribute_A_unset is success
- "'A' not in attribute_A_unset.stdout_lines[0].split()[0]"
- name: explicitly set file attribute "A"
file: path={{remote_tmp_dir_test}}/baz.txt attributes=A
register: file_attributes_result
ignore_errors: True
when: attributes_supported
- name: add file attribute "A"
file: path={{remote_tmp_dir_test}}/baz.txt attributes=+A
register: file_attributes_result_2
when: file_attributes_result is changed
- name: verify that the file was not marked as changed
assert:
that:
- "file_attributes_result_2 is not changed"
when: file_attributes_result is changed
- name: remove file attribute "A"
file: path={{remote_tmp_dir_test}}/baz.txt attributes=-A
register: file_attributes_result_3
ignore_errors: True
- name: explicitly remove file attributes
file: path={{remote_tmp_dir_test}}/baz.txt attributes=""
register: file_attributes_result_4
when: file_attributes_result_3 is changed
- name: verify that the file was not marked as changed
assert:
that:
- "file_attributes_result_4 is not changed"
when: file_attributes_result_4 is changed
- name: create user
user:
name: test1
uid: 1234
notify: remove users
- name: create group
group:
name: test1
gid: 1234
notify: remove groups
- name: change ownership and group
file: path={{remote_tmp_dir_test}}/baz.txt owner=1234 group=1234
- name: Get stat info to check atime later
stat: path={{remote_tmp_dir_test}}/baz.txt
register: file_attributes_result_5_before
- name: updates access time
file: path={{remote_tmp_dir_test}}/baz.txt access_time=now
register: file_attributes_result_5
- name: Get stat info to check atime later
stat: path={{remote_tmp_dir_test}}/baz.txt
register: file_attributes_result_5_after
- name: verify that the file was marked as changed and atime changed
assert:
that:
- "file_attributes_result_5 is changed"
- "file_attributes_result_5_after['stat']['atime'] != file_attributes_result_5_before['stat']['atime']"
- name: setup a tmp-like directory for ownership test
file: path=/tmp/worldwritable mode=1777 state=directory
- name: Ask to create a file without enough perms to change ownership
file: path=/tmp/worldwritable/baz.txt state=touch owner=root
become: yes
become_user: nobody
register: chown_result
ignore_errors: True
- name: Ask whether the new file exists
stat: path=/tmp/worldwritable/baz.txt
register: file_exists_result
- name: Verify that the file doesn't exist on failure
assert:
that:
- "chown_result.failed == True"
- "file_exists_result.stat.exists == False"
- name: clean up
file: path=/tmp/worldwritable state=absent
- name: create hard link to file
file: src={{output_file}} dest={{remote_tmp_dir_test}}/hard.txt state=hard
register: file6_result
- name: verify that the file was marked as changed
assert:
that:
- "file6_result.changed == true"
- name: touch a hard link
file:
dest: '{{ remote_tmp_dir_test }}/hard.txt'
state: 'touch'
register: file6_touch_result
- name: verify that the hard link was touched
assert:
that:
- "file6_touch_result.changed == true"
- name: stat1
stat: path={{output_file}}
register: hlstat1
- name: stat2
stat: path={{remote_tmp_dir_test}}/hard.txt
register: hlstat2
- name: verify that hard link is still the same after timestamp updated
assert:
that:
- "hlstat1.stat.inode == hlstat2.stat.inode"
- name: create hard link to file 2
file: src={{output_file}} dest={{remote_tmp_dir_test}}/hard.txt state=hard
register: hlink_result
- name: verify that hard link creation is idempotent
assert:
that:
- "hlink_result.changed == False"
- name: Change mode on a hard link
file: src={{output_file}} dest={{remote_tmp_dir_test}}/hard.txt mode=0701
register: file6_mode_change
- name: verify that the hard link was touched
assert:
that:
- "file6_touch_result.changed == true"
- name: stat1
stat: path={{output_file}}
register: hlstat1
- name: stat2
stat: path={{remote_tmp_dir_test}}/hard.txt
register: hlstat2
- name: verify that hard link is still the same after timestamp updated
assert:
that:
- "hlstat1.stat.inode == hlstat2.stat.inode"
- "hlstat1.stat.mode == '0701'"
- name: create a directory
file: path={{remote_tmp_dir_test}}/foobar state=directory
register: file7_result
- name: verify that the file was marked as changed
assert:
that:
- "file7_result.changed == true"
- "file7_result.state == 'directory'"
- name: determine if selinux is installed
shell: which getenforce || exit 0
register: selinux_installed
- name: determine if selinux is enabled
shell: getenforce
register: selinux_enabled
when: selinux_installed.stdout != ""
ignore_errors: true
- name: remove directory foobar
file: path={{remote_tmp_dir_test}}/foobar state=absent
- name: remove file foo.txt
file: path={{remote_tmp_dir_test}}/foo.txt state=absent
- name: remove file bar.txt
file: path={{remote_tmp_dir_test}}/foo.txt state=absent
- name: remove file baz.txt
file: path={{remote_tmp_dir_test}}/foo.txt state=absent
- name: copy directory structure over
copy: src=foobar dest={{remote_tmp_dir_test}}
- name: check what would be removed if folder state was absent and diff is enabled
file:
path: "{{ item }}"
state: absent
check_mode: yes
diff: yes
with_items:
- "{{ remote_tmp_dir_test }}"
- "{{ remote_tmp_dir_test }}/foobar/fileA"
register: folder_absent_result
- name: 'assert that the "absent" state lists expected files and folders for only directories'
assert:
that:
- folder_absent_result.results[0].diff.before.path_content is defined
- folder_absent_result.results[1].diff.before.path_content is not defined
- test_folder in folder_absent_result.results[0].diff.before.path_content.directories
- test_file in folder_absent_result.results[0].diff.before.path_content.files
vars:
test_folder: "{{ folder_absent_result.results[0].path }}/foobar"
test_file: "{{ folder_absent_result.results[0].path }}/foobar/fileA"
- name: Change ownership of a directory with recurse=no(default)
file: path={{remote_tmp_dir_test}}/foobar owner=1234
- name: verify that the permission of the directory was set
file: path={{remote_tmp_dir_test}}/foobar state=directory
register: file8_result
- name: assert that the directory has changed to have owner 1234
assert:
that:
- "file8_result.uid == 1234"
- name: verify that the permission of a file under the directory was not set
file: path={{remote_tmp_dir_test}}/foobar/fileA state=file
register: file9_result
- name: assert the file owner has not changed to 1234
assert:
that:
- "file9_result.uid != 1234"
- name: create user
user:
name: test2
uid: 1235
- name: change the ownership of a directory with recurse=yes
file: path={{remote_tmp_dir_test}}/foobar owner=1235 recurse=yes
- name: verify that the permission of the directory was set
file: path={{remote_tmp_dir_test}}/foobar state=directory
register: file10_result
- name: assert that the directory has changed to have owner 1235
assert:
that:
- "file10_result.uid == 1235"
- name: verify that the permission of a file under the directory was not set
file: path={{remote_tmp_dir_test}}/foobar/fileA state=file
register: file11_result
- name: assert that the file has changed to have owner 1235
assert:
that:
- "file11_result.uid == 1235"
- name: remove directory foobar
file: path={{remote_tmp_dir_test}}/foobar state=absent
register: file14_result
- name: verify that the directory was removed
assert:
that:
- 'file14_result.changed == true'
- 'file14_result.state == "absent"'
- name: create a test sub-directory
file: dest={{remote_tmp_dir_test}}/sub1 state=directory
register: file15_result
- name: verify that the new directory was created
assert:
that:
- 'file15_result.changed == true'
- 'file15_result.state == "directory"'
- name: create test files in the sub-directory
file: dest={{remote_tmp_dir_test}}/sub1/{{item}} state=touch
with_items:
- file1
- file2
- file3
register: file16_result
- name: verify the files were created
assert:
that:
- 'item.changed == true'
- 'item.state == "file"'
with_items: "{{file16_result.results}}"
- name: test file creation with symbolic mode
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=u=rwx,g=rwx,o=rwx
register: result
- name: assert file mode
assert:
that:
- result.mode == '0777'
- name: modify symbolic mode for all
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=a=r
register: result
- name: assert file mode
assert:
that:
- result.mode == '0444'
- name: modify symbolic mode for owner
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=u+w
register: result
- name: assert file mode
assert:
that:
- result.mode == '0644'
- name: modify symbolic mode for group
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=g+w
register: result
- name: assert file mode
assert:
that:
- result.mode == '0664'
- name: modify symbolic mode for world
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=o+w
register: result
- name: assert file mode
assert:
that:
- result.mode == '0666'
- name: modify symbolic mode for owner
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=u+x
register: result
- name: assert file mode
assert:
that:
- result.mode == '0766'
- name: modify symbolic mode for group
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=g+x
register: result
- name: assert file mode
assert:
that:
- result.mode == '0776'
- name: modify symbolic mode for world
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=o+x
register: result
- name: assert file mode
assert:
that:
- result.mode == '0777'
- name: remove symbolic mode for world
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=o-wx
register: result
- name: assert file mode
assert:
that:
- result.mode == '0774'
- name: remove symbolic mode for group
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=g-wx
register: result
- name: assert file mode
assert:
that:
- result.mode == '0744'
- name: remove symbolic mode for owner
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=u-wx
register: result
- name: assert file mode
assert:
that:
- result.mode == '0444'
- name: set sticky bit with symbolic mode
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=o+t
register: result
- name: assert file mode
assert:
that:
- result.mode == '01444'
- name: remove sticky bit with symbolic mode
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=o-t
register: result
- name: assert file mode
assert:
that:
- result.mode == '0444'
- name: add setgid with symbolic mode
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=g+s
register: result
- name: assert file mode
assert:
that:
- result.mode == '02444'
- name: remove setgid with symbolic mode
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=g-s
register: result
- name: assert file mode
assert:
that:
- result.mode == '0444'
- name: add setuid with symbolic mode
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=u+s
register: result
- name: assert file mode
assert:
that:
- result.mode == '04444'
- name: remove setuid with symbolic mode
file: dest={{remote_tmp_dir_test}}/test_symbolic state=touch mode=u-s
register: result
- name: assert file mode
assert:
that:
- result.mode == '0444'
# https://github.com/ansible/ansible/issues/67307
# Test the module fails in check_mode when directory and owner/group do not exist
# I don't use state=touch here intentionally to fail and catch warnings
- name: owner does not exist in check_mode
file:
path: '/tmp/nonexistent'
owner: nonexistent
check_mode: yes
register: owner_no_exist
ignore_errors: yes
- name: create owner
user:
name: nonexistent
notify: remove users
# I don't use state=touch here intentionally to fail and catch warnings
- name: owner exist in check_mode
file:
path: '/tmp/nonexistent'
owner: nonexistent
check_mode: yes
register: owner_exists
ignore_errors: yes
# I don't use state=touch here intentionally to fail and catch warnings
- name: owner does not exist in check_mode, using uid
file:
path: '/tmp/nonexistent'
owner: '111111'
check_mode: yes
ignore_errors: yes
register: owner_uid_no_exist
- name: create owner using uid
user:
name: test_uid
uid: 111111
notify: remove users
# I don't use state=touch here intentionally to fail and catch warnings
- name: owner exists in check_mode, using uid
file:
path: '/tmp/nonexistent'
owner: '111111'
state: touch
check_mode: yes
ignore_errors: yes
register: owner_uid_exists
# I don't use state=touch here intentionally to fail and catch warnings
- name: group does not exist in check_mode
file:
path: '/tmp/nonexistent'
group: nonexistent1
check_mode: yes
register: group_no_exist
ignore_errors: yes
- name: create group
group:
name: nonexistent1
notify: remove groups
# I don't use state=touch here intentionally to fail and catch warnings
- name: group exists in check_mode
file:
path: '/tmp/nonexistent'
group: nonexistent1
check_mode: yes
register: group_exists
ignore_errors: yes
# I don't use state=touch here intentionally to fail and catch warnings
- name: group does not exist in check_mode, using gid
file:
path: '/tmp/nonexistent'
group: '111112'
check_mode: yes
register: group_gid_no_exist
ignore_errors: yes
- name: create group with gid
group:
name: test_gid
gid: 111112
notify: remove groups
# I don't use state=touch here intentionally to fail and catch warnings
- name: group exists in check_mode, using gid
file:
path: '/tmp/nonexistent'
group: '111112'
check_mode: yes
register: group_gid_exists
ignore_errors: yes
- assert:
that:
- owner_no_exist.warnings[0] is search('failed to look up user')
- owner_uid_no_exist.warnings[0] is search('failed to look up user with uid')
- group_no_exist.warnings[0] is search('failed to look up group')
- group_gid_no_exist.warnings[0] is search('failed to look up group with gid')
- owner_exists.warnings is not defined
- owner_uid_exists.warnings is not defined
- group_exists.warnings is not defined
- group_gid_exists.warnings is not defined
# https://github.com/ansible/ansible/issues/50943
# Need to use /tmp as nobody can't access remote_tmp_dir_test at all
- name: create file as root with all write permissions
file: dest=/tmp/write_utime state=touch mode=0666 owner={{ansible_user_id}}
- name: Pause to ensure stat times are not the exact same
pause:
seconds: 1
- block:
- name: get previous time
stat: path=/tmp/write_utime
register: previous_time
- name: pause for 1 second to ensure the next touch is newer
pause: seconds=1
- name: touch file as nobody
file: dest=/tmp/write_utime state=touch
become: True
become_user: nobody
register: result
- name: get new time
stat: path=/tmp/write_utime
register: current_time
always:
- name: remove test utime file
file: path=/tmp/write_utime state=absent
- name: assert touch file as nobody
assert:
that:
- result is changed
- current_time.stat.atime > previous_time.stat.atime
- current_time.stat.mtime > previous_time.stat.mtime
# Follow + recursive tests
- name: create a toplevel directory
file: path={{remote_tmp_dir_test}}/test_follow_rec state=directory mode=0755
- name: create a file outside of the toplevel
file: path={{remote_tmp_dir_test}}/test_follow_rec_target_file state=touch mode=0700
- name: create a directory outside of the toplevel
file: path={{remote_tmp_dir_test}}/test_follow_rec_target_dir state=directory mode=0700
- name: create a file inside of the link target directory
file: path={{remote_tmp_dir_test}}/test_follow_rec_target_dir/foo state=touch mode=0700
- name: create a symlink to the file
file: path={{remote_tmp_dir_test}}/test_follow_rec/test_link state=link src="../test_follow_rec_target_file"
- name: create a symlink to the directory
file: path={{remote_tmp_dir_test}}/test_follow_rec/test_link_dir state=link src="../test_follow_rec_target_dir"
- name: create a symlink to a nonexistent file
file: path={{remote_tmp_dir_test}}/test_follow_rec/nonexistent state=link src=does_not_exist force=True
- name: try to change permissions without following symlinks
file: path={{remote_tmp_dir_test}}/test_follow_rec follow=False mode="a-x" recurse=True
- name: stat the link file target
stat: path={{remote_tmp_dir_test}}/test_follow_rec_target_file
register: file_result
- name: stat the link dir target
stat: path={{remote_tmp_dir_test}}/test_follow_rec_target_dir
register: dir_result
- name: stat the file inside the link dir target
stat: path={{remote_tmp_dir_test}}/test_follow_rec_target_dir/foo
register: file_in_dir_result
- name: assert that the link targets were unmodified
assert:
that:
- file_result.stat.mode == '0700'
- dir_result.stat.mode == '0700'
- file_in_dir_result.stat.mode == '0700'
- name: try to change permissions with following symlinks
file: path={{remote_tmp_dir_test}}/test_follow_rec follow=True mode="a-x" recurse=True
- name: stat the link file target
stat: path={{remote_tmp_dir_test}}/test_follow_rec_target_file
register: file_result
- name: stat the link dir target
stat: path={{remote_tmp_dir_test}}/test_follow_rec_target_dir
register: dir_result
- name: stat the file inside the link dir target
stat: path={{remote_tmp_dir_test}}/test_follow_rec_target_dir/foo
register: file_in_dir_result
- name: assert that the link targets were modified
assert:
that:
- file_result.stat.mode == '0600'
- dir_result.stat.mode == '0600'
- file_in_dir_result.stat.mode == '0600'
# https://github.com/ansible/ansible/issues/55971
- name: Test missing src and path
file:
state: hard
register: file_error1
ignore_errors: yes
- assert:
that:
- "file_error1 is failed"
- "file_error1.msg == 'missing required arguments: path'"
- name: Test missing src
file:
dest: "{{ remote_tmp_dir_test }}/hard.txt"
state: hard
register: file_error2
ignore_errors: yes
- assert:
that:
- "file_error2 is failed"
- "file_error2.msg == 'src is required for creating new hardlinks'"
- name: Test non-existing src
file:
src: non-existing-file-that-does-not-exist.txt
dest: "{{ remote_tmp_dir_test }}/hard.txt"
state: hard
register: file_error3
ignore_errors: yes
- assert:
that:
- "file_error3 is failed"
- "file_error3.msg == 'src does not exist'"
- "file_error3.dest == '{{ remote_tmp_dir_test }}/hard.txt' | expanduser"
- "file_error3.src == 'non-existing-file-that-does-not-exist.txt'"
- block:
- name: Create a testing file
file:
dest: original_file.txt
state: touch
- name: Test relative path with state=hard
file:
src: original_file.txt
dest: hard_link_file.txt
state: hard
register: hard_link_relpath
- name: Just check if it was successful, we don't care about the actual hard link in this test
assert:
that:
- "hard_link_relpath is success"
always:
- name: Clean up
file:
path: "{{ item }}"
state: absent
loop:
- original_file.txt
- hard_link_file.txt
# END #55971
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,142 |
ansible.builtin.file fails to change permissions on hard links unless you explicitly define 'state: file'
|
### Summary
When using the file module to iterate over a list of files, some of which could be hardlinks, the file module fails with "src is required for creating new hardlinks" However, I am simply trying to modify the permissions of the files, not create new ones.
For example, I gather the list of files like so (keep in mind some of these are regular files, some of them are hardlinks):
```
- name: Finding all *.crt files in the pki directory
find:
paths: "{{ item }}"
patterns: '*.crt'
loop: "{{ crt_directories }}"
register: crt_find
```
So at this point, `crt_find` is a list of files and hard links.
Then, I iterate over `crt_find` to set the permissions on all of the files and hardlinks using the file module:
```
- name: Setting certificate file Permissions
file:
path: "{{ item.1.path }}"
mode: 0644
loop: "{{ crt_find.results | subelements('files') }}"
```
This works perfectly fine for regular files. When it gets to a hardlink, it fails with "src is required for creating new hardlinks." The problem is, I'm not trying to create a new hardlink. I'm simply trying to modify the permissions.
Interestingly, this goes away if I explicitly define `state: file` as a parameter on the file module task:
```
- name: Setting certificate file Permissions
file:
path: "{{ item.1.path }}"
state: file # <---------------This fixes the problem
mode: 0644
loop: "{{ crt_find.results | subelements('files') }}"
```
While I've clearly found a solution to my problem, it definitely seems like a bug with the file module, as `state: file` is the default if omitted.
### Issue Type
Bug Report
### Component Name
ansible.builtin.file
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = /Users/me/.ansible.cfg
configured module search path = ['/Users/me/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/me/venv/lib/python3.8/site-packages/ansible
ansible collection location = /Users/me/.ansible/collections:/usr/share/ansible/collections
executable location = /Users/me/venv/bin/ansible
python version = 3.8.2 (default, Jun 8 2021, 11:59:35) [Clang 12.0.5 (clang-1205.0.22.11)]
jinja version = 3.0.1
libyaml = False
```
### Configuration
```console
$ ansible-config dump --only-changed
DEFAULT_ROLES_PATH(/Users/me/.ansible.cfg) = ['/Users/me/repos']
DEFAULT_STDOUT_CALLBACK(env: ANSIBLE_STDOUT_CALLBACK) = yaml
HOST_KEY_CHECKING(/Users/me/.ansible.cfg) = False
```
### OS / Environment
Control machine = MacOS
Target machine = Ubuntu 20
### Steps to Reproduce
Stated in the Summary
### Expected Results
Stated in the Summary
### Actual Results
```console
Stated in the Summary
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76142
|
https://github.com/ansible/ansible/pull/76167
|
69c7e3f4a3e244b28eef919ff2c1356ea6d06b1c
|
3f1838bf918a055b491ab25d6caee2e5022db3c1
| 2021-10-26T14:18:11Z |
python
| 2022-08-10T20:01:31Z |
test/integration/targets/file/tasks/state_link.yml
|
# file module tests for dealing with symlinks (state=link)
- name: Initialize the test output dir
import_tasks: initialize.yml
#
# Basic absolute symlink to a file
#
- name: create soft link to file
file: src={{output_file}} dest={{remote_tmp_dir_test}}/soft.txt state=link
register: file1_result
- name: Get stat info for the link
stat:
path: '{{ remote_tmp_dir_test }}/soft.txt'
follow: False
register: file1_link_stat
- name: verify that the symlink was created correctly
assert:
that:
- 'file1_result is changed'
- 'file1_link_stat["stat"].islnk'
- 'file1_link_stat["stat"].lnk_target | expanduser == output_file | expanduser'
#
# Change an absolute soft link into a relative soft link
#
- name: change soft link to relative
file: src={{output_file|basename}} dest={{remote_tmp_dir_test}}/soft.txt state=link
register: file2_result
- name: Get stat info for the link
stat:
path: '{{ remote_tmp_dir_test }}/soft.txt'
follow: False
register: file2_link_stat
- name: verify that the file was marked as changed
assert:
that:
- "file2_result is changed"
- "file2_result.diff.before.src == remote_file_expanded"
- "file2_result.diff.after.src == remote_file_expanded|basename"
- "file2_link_stat['stat'].islnk"
- "file2_link_stat['stat'].lnk_target == remote_file_expanded | basename"
#
# Check that creating the soft link a second time was idempotent
#
- name: soft link idempotency check
file: src={{output_file|basename}} dest={{remote_tmp_dir_test}}/soft.txt state=link
register: file3_result
- name: Get stat info for the link
stat:
path: '{{ remote_tmp_dir_test }}/soft.txt'
follow: False
register: file3_link_stat
- name: verify that the file was not marked as changed
assert:
that:
- "not file3_result is changed"
- "file3_link_stat['stat'].islnk"
- "file3_link_stat['stat'].lnk_target == remote_file_expanded | basename"
#
# Test symlink to nonexistent files
#
- name: fail to create soft link to non existent file
file:
src: '/nonexistent'
dest: '{{remote_tmp_dir_test}}/soft2.txt'
state: 'link'
force: False
register: file4_result
ignore_errors: true
- name: verify that link was not created
assert:
that:
- "file4_result is failed"
- name: force creation soft link to non existent
file:
src: '/nonexistent'
dest: '{{ remote_tmp_dir_test}}/soft2.txt'
state: 'link'
force: True
register: file5_result
- name: Get stat info for the link
stat:
path: '{{ remote_tmp_dir_test }}/soft2.txt'
follow: False
register: file5_link_stat
- name: verify that link was created
assert:
that:
- "file5_result is changed"
- "file5_link_stat['stat'].islnk"
- "file5_link_stat['stat'].lnk_target == '/nonexistent'"
- name: Prove idempotence of force creation soft link to non existent
file:
src: '/nonexistent'
dest: '{{ remote_tmp_dir_test }}/soft2.txt'
state: 'link'
force: True
register: file6a_result
- name: verify that the link to nonexistent is idempotent
assert:
that:
- "file6a_result.changed == false"
# In order for a symlink in a sticky world writable directory to be followed, it must
# either be owned by the follower,
# or the directory and symlink must have the same owner.
- name: symlink in sticky directory
block:
- name: Create remote unprivileged remote user
user:
name: '{{ remote_unprivileged_user }}'
register: user
notify: remove users
- name: Create a local temporary directory
tempfile:
state: directory
register: tempdir
- name: Set sticky bit
file:
path: '{{ tempdir.path }}'
mode: o=rwXt
- name: 'Check mode: force creation soft link in sticky directory owned by another user (mode is used)'
file:
src: '{{ user.home }}/nonexistent'
dest: '{{ tempdir.path }}/soft3.txt'
mode: 0640
state: 'link'
owner: '{{ remote_unprivileged_user }}'
force: true
follow: false
check_mode: true
register: missing_dst_no_follow_enable_force_use_mode1
- name: force creation soft link in sticky directory owned by another user (mode is used)
file:
src: '{{ user.home }}/nonexistent'
dest: '{{ tempdir.path }}/soft3.txt'
mode: 0640
state: 'link'
owner: '{{ remote_unprivileged_user }}'
force: true
follow: false
register: missing_dst_no_follow_enable_force_use_mode2
- name: Get stat info for the link
stat:
path: '{{ tempdir.path }}/soft3.txt'
follow: false
register: soft3_result
- name: 'Idempotence: force creation soft link in sticky directory owned by another user (mode is used)'
file:
src: '{{ user.home }}/nonexistent'
dest: '{{ tempdir.path }}/soft3.txt'
mode: 0640
state: 'link'
owner: '{{ remote_unprivileged_user }}'
force: yes
follow: false
register: missing_dst_no_follow_enable_force_use_mode3
always:
- name: Delete remote unprivileged remote user
user:
name: '{{ remote_unprivileged_user }}'
state: absent
force: yes
remove: yes
- name: Delete unprivileged user home and tempdir
file:
path: "{{ item }}"
state: absent
loop:
- '{{ tempdir.path }}'
- '{{ user.home }}'
- name: verify that link was created
assert:
that:
- "missing_dst_no_follow_enable_force_use_mode1 is changed"
- "missing_dst_no_follow_enable_force_use_mode2 is changed"
- "missing_dst_no_follow_enable_force_use_mode3 is not changed"
- "soft3_result['stat'].islnk"
- "soft3_result['stat'].lnk_target == '{{ user.home }}/nonexistent'"
#
# Test creating a link to a directory https://github.com/ansible/ansible/issues/1369
#
- name: create soft link to directory using absolute path
file:
src: '/'
dest: '{{ remote_tmp_dir_test }}/root'
state: 'link'
register: file6_result
- name: Get stat info for the link
stat:
path: '{{ remote_tmp_dir_test }}/root'
follow: False
register: file6_link_stat
- name: Get stat info for the pointed to file
stat:
path: '{{ remote_tmp_dir_test }}/root'
follow: True
register: file6_links_dest_stat
- name: Get stat info for the file we intend to point to
stat:
path: '/'
follow: False
register: file6_dest_stat
- name: verify that the link was created correctly
assert:
that:
# file command reports it created something
- "file6_result.changed == true"
# file command created a link
- 'file6_link_stat["stat"]["islnk"]'
# Link points to the right path
- 'file6_link_stat["stat"]["lnk_target"] == "/"'
# The link target and the file we intended to link to have the same inode
- 'file6_links_dest_stat["stat"]["inode"] == file6_dest_stat["stat"]["inode"]'
#
# Test creating a relative link
#
# Relative link to file
- name: create a test sub-directory to link to
file:
dest: '{{ remote_tmp_dir_test }}/sub1'
state: 'directory'
- name: create a file to link to in the test sub-directory
file:
dest: '{{ remote_tmp_dir_test }}/sub1/file1'
state: 'touch'
- name: create another test sub-directory to place links within
file:
dest: '{{remote_tmp_dir_test}}/sub2'
state: 'directory'
- name: create soft link to relative file
file:
src: '../sub1/file1'
dest: '{{ remote_tmp_dir_test }}/sub2/link1'
state: 'link'
register: file7_result
- name: Get stat info for the link
stat:
path: '{{ remote_tmp_dir_test }}/sub2/link1'
follow: False
register: file7_link_stat
- name: Get stat info for the pointed to file
stat:
path: '{{ remote_tmp_dir_test }}/sub2/link1'
follow: True
register: file7_links_dest_stat
- name: Get stat info for the file we intend to point to
stat:
path: '{{ remote_tmp_dir_test }}/sub1/file1'
follow: False
register: file7_dest_stat
- name: verify that the link was created correctly
assert:
that:
# file command reports it created something
- "file7_result.changed == true"
# file command created a link
- 'file7_link_stat["stat"]["islnk"]'
# Link points to the right path
- 'file7_link_stat["stat"]["lnk_target"] == "../sub1/file1"'
# The link target and the file we intended to link to have the same inode
- 'file7_links_dest_stat["stat"]["inode"] == file7_dest_stat["stat"]["inode"]'
# Relative link to directory
- name: create soft link to relative directory
file:
src: sub1
dest: '{{ remote_tmp_dir_test }}/sub1-link'
state: 'link'
register: file8_result
- name: Get stat info for the link
stat:
path: '{{ remote_tmp_dir_test }}/sub1-link'
follow: False
register: file8_link_stat
- name: Get stat info for the pointed to file
stat:
path: '{{ remote_tmp_dir_test }}/sub1-link'
follow: True
register: file8_links_dest_stat
- name: Get stat info for the file we intend to point to
stat:
path: '{{ remote_tmp_dir_test }}/sub1'
follow: False
register: file8_dest_stat
- name: verify that the link was created correctly
assert:
that:
# file command reports it created something
- "file8_result.changed == true"
# file command created a link
- 'file8_link_stat["stat"]["islnk"]'
# Link points to the right path
- 'file8_link_stat["stat"]["lnk_target"] == "sub1"'
# The link target and the file we intended to link to have the same inode
- 'file8_links_dest_stat["stat"]["inode"] == file8_dest_stat["stat"]["inode"]'
# test the file module using follow=yes, so that the target of a
# symlink is modified, rather than the link itself
- name: create a test file
copy:
dest: '{{remote_tmp_dir_test}}/test_follow'
content: 'this is a test file\n'
mode: 0666
- name: create a symlink to the test file
file:
path: '{{remote_tmp_dir_test}}/test_follow_link'
src: './test_follow'
state: 'link'
- name: modify the permissions on the link using follow=yes
file:
path: '{{remote_tmp_dir_test}}/test_follow_link'
mode: 0644
follow: yes
register: file9_result
- name: stat the link target
stat:
path: '{{remote_tmp_dir_test}}/test_follow'
register: file9_stat
- name: assert that the chmod worked
assert:
that:
- 'file9_result is changed'
- 'file9_stat["stat"]["mode"] == "0644"'
#
# Test modifying the permissions of a link itself
#
- name: attempt to modify the permissions of the link itself
file:
path: '{{remote_tmp_dir_test}}/test_follow_link'
src: './test_follow'
state: 'link'
mode: 0600
follow: False
register: file10_result
# Whether the link itself changed is platform dependent! (BSD vs Linux?)
# Just check that the underlying file was not changed
- name: stat the link target
stat:
path: '{{remote_tmp_dir_test}}/test_follow'
register: file10_target_stat
- name: assert that the link target was unmodified
assert:
that:
- 'file10_target_stat["stat"]["mode"] == "0644"'
# https://github.com/ansible/ansible/issues/56928
- block:
- name: Create a testing file
file:
path: "{{ remote_tmp_dir_test }}/test_follow1"
state: touch
- name: Create a symlink and change mode of the original file, since follow == yes by default
file:
src: "{{ remote_tmp_dir_test }}/test_follow1"
dest: "{{ remote_tmp_dir_test }}/test_follow1_link"
state: link
mode: 0700
- name: stat the original file
stat:
path: "{{ remote_tmp_dir_test }}/test_follow1"
register: stat_out
- name: Check if the mode of the original file was set
assert:
that:
- 'stat_out.stat.mode == "0700"'
always:
- name: Clean up
file:
path: "{{ item }}"
state: absent
loop:
- "{{ remote_tmp_dir_test }}/test_follow1"
- "{{ remote_tmp_dir_test }}/test_follow1_link"
# END #56928
# Test failure with src and no state parameter
- name: Specify src without state
file:
src: "{{ output_file }}"
dest: "{{ remote_tmp_dir_test }}/link.txt"
ignore_errors: yes
register: src_state
- name: Ensure src without state failed
assert:
that:
- src_state is failed
- "'src option requires state to be' in src_state.msg"
# Test creating a symlink when the destination exists and is a file
- name: create a test file
copy:
dest: '{{ remote_tmp_dir_test }}/file.txt'
content: 'this is a test file\n'
mode: 0666
- name: Create a symlink with dest already a file
file:
src: '{{ output_file }}'
dest: '{{ remote_tmp_dir_test }}/file.txt'
state: link
ignore_errors: true
register: dest_is_existing_file_fail
- name: Stat to make sure the symlink was not created
stat:
path: '{{ remote_tmp_dir_test }}/file.txt'
follow: false
register: dest_is_existing_file_fail_stat
- name: Forcefully a symlink with dest already a file
file:
src: '{{ output_file }}'
dest: '{{ remote_tmp_dir_test }}/file.txt'
state: link
force: true
register: dest_is_existing_file_force
- name: Stat to make sure the symlink was created
stat:
path: '{{ remote_tmp_dir_test }}/file.txt'
follow: false
register: dest_is_existing_file_force_stat
- assert:
that:
- dest_is_existing_file_fail is failed
- not dest_is_existing_file_fail_stat.stat.islnk
- dest_is_existing_file_force is changed
- dest_is_existing_file_force_stat.stat.exists
- dest_is_existing_file_force_stat.stat.islnk
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,142 |
ansible.builtin.file fails to change permissions on hard links unless you explicitly define 'state: file'
|
### Summary
When using the file module to iterate over a list of files, some of which could be hardlinks, the file module fails with "src is required for creating new hardlinks" However, I am simply trying to modify the permissions of the files, not create new ones.
For example, I gather the list of files like so (keep in mind some of these are regular files, some of them are hardlinks):
```
- name: Finding all *.crt files in the pki directory
find:
paths: "{{ item }}"
patterns: '*.crt'
loop: "{{ crt_directories }}"
register: crt_find
```
So at this point, `crt_find` is a list of files and hard links.
Then, I iterate over `crt_find` to set the permissions on all of the files and hardlinks using the file module:
```
- name: Setting certificate file Permissions
file:
path: "{{ item.1.path }}"
mode: 0644
loop: "{{ crt_find.results | subelements('files') }}"
```
This works perfectly fine for regular files. When it gets to a hardlink, it fails with "src is required for creating new hardlinks." The problem is, I'm not trying to create a new hardlink. I'm simply trying to modify the permissions.
Interestingly, this goes away if I explicitly define `state: file` as a parameter on the file module task:
```
- name: Setting certificate file Permissions
file:
path: "{{ item.1.path }}"
state: file # <---------------This fixes the problem
mode: 0644
loop: "{{ crt_find.results | subelements('files') }}"
```
While I've clearly found a solution to my problem, it definitely seems like a bug with the file module, as `state: file` is the default if omitted.
### Issue Type
Bug Report
### Component Name
ansible.builtin.file
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = /Users/me/.ansible.cfg
configured module search path = ['/Users/me/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/me/venv/lib/python3.8/site-packages/ansible
ansible collection location = /Users/me/.ansible/collections:/usr/share/ansible/collections
executable location = /Users/me/venv/bin/ansible
python version = 3.8.2 (default, Jun 8 2021, 11:59:35) [Clang 12.0.5 (clang-1205.0.22.11)]
jinja version = 3.0.1
libyaml = False
```
### Configuration
```console
$ ansible-config dump --only-changed
DEFAULT_ROLES_PATH(/Users/me/.ansible.cfg) = ['/Users/me/repos']
DEFAULT_STDOUT_CALLBACK(env: ANSIBLE_STDOUT_CALLBACK) = yaml
HOST_KEY_CHECKING(/Users/me/.ansible.cfg) = False
```
### OS / Environment
Control machine = MacOS
Target machine = Ubuntu 20
### Steps to Reproduce
Stated in the Summary
### Expected Results
Stated in the Summary
### Actual Results
```console
Stated in the Summary
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76142
|
https://github.com/ansible/ansible/pull/76167
|
69c7e3f4a3e244b28eef919ff2c1356ea6d06b1c
|
3f1838bf918a055b491ab25d6caee2e5022db3c1
| 2021-10-26T14:18:11Z |
python
| 2022-08-10T20:01:31Z |
test/sanity/ignore.txt
|
.azure-pipelines/scripts/publish-codecov.py replace-urlopen
docs/docsite/rst/dev_guide/testing/sanity/no-smart-quotes.rst no-smart-quotes
docs/docsite/rst/locales/ja/LC_MESSAGES/dev_guide.po no-smart-quotes # Translation of the no-smart-quotes rule
examples/scripts/ConfigureRemotingForAnsible.ps1 pslint:PSCustomUseLiteralPath
examples/scripts/upgrade_to_ps3.ps1 pslint:PSCustomUseLiteralPath
examples/scripts/upgrade_to_ps3.ps1 pslint:PSUseApprovedVerbs
lib/ansible/cli/scripts/ansible_connection_cli_stub.py shebang
lib/ansible/config/base.yml no-unwanted-files
lib/ansible/executor/playbook_executor.py pylint:disallowed-name
lib/ansible/executor/powershell/async_watchdog.ps1 pslint:PSCustomUseLiteralPath
lib/ansible/executor/powershell/async_wrapper.ps1 pslint:PSCustomUseLiteralPath
lib/ansible/executor/powershell/exec_wrapper.ps1 pslint:PSCustomUseLiteralPath
lib/ansible/executor/task_queue_manager.py pylint:disallowed-name
lib/ansible/keyword_desc.yml no-unwanted-files
lib/ansible/modules/apt.py validate-modules:parameter-invalid
lib/ansible/modules/apt_repository.py validate-modules:parameter-invalid
lib/ansible/modules/assemble.py validate-modules:nonexistent-parameter-documented
lib/ansible/modules/async_status.py use-argspec-type-path
lib/ansible/modules/async_status.py validate-modules!skip
lib/ansible/modules/async_wrapper.py ansible-doc!skip # not an actual module
lib/ansible/modules/async_wrapper.py pylint:ansible-bad-function # ignore, required
lib/ansible/modules/async_wrapper.py use-argspec-type-path
lib/ansible/modules/blockinfile.py validate-modules:doc-choices-do-not-match-spec
lib/ansible/modules/blockinfile.py validate-modules:doc-default-does-not-match-spec
lib/ansible/modules/command.py validate-modules:doc-default-does-not-match-spec # _uses_shell is undocumented
lib/ansible/modules/command.py validate-modules:doc-missing-type
lib/ansible/modules/command.py validate-modules:nonexistent-parameter-documented
lib/ansible/modules/command.py validate-modules:undocumented-parameter
lib/ansible/modules/copy.py pylint:disallowed-name
lib/ansible/modules/copy.py validate-modules:doc-default-does-not-match-spec
lib/ansible/modules/copy.py validate-modules:nonexistent-parameter-documented
lib/ansible/modules/copy.py validate-modules:undocumented-parameter
lib/ansible/modules/dnf.py validate-modules:doc-required-mismatch
lib/ansible/modules/dnf.py validate-modules:parameter-invalid
lib/ansible/modules/file.py validate-modules:doc-default-does-not-match-spec
lib/ansible/modules/file.py validate-modules:undocumented-parameter
lib/ansible/modules/find.py use-argspec-type-path # fix needed
lib/ansible/modules/git.py pylint:disallowed-name
lib/ansible/modules/git.py use-argspec-type-path
lib/ansible/modules/git.py validate-modules:doc-missing-type
lib/ansible/modules/git.py validate-modules:doc-required-mismatch
lib/ansible/modules/iptables.py pylint:disallowed-name
lib/ansible/modules/lineinfile.py validate-modules:doc-choices-do-not-match-spec
lib/ansible/modules/lineinfile.py validate-modules:doc-default-does-not-match-spec
lib/ansible/modules/lineinfile.py validate-modules:nonexistent-parameter-documented
lib/ansible/modules/package_facts.py validate-modules:doc-choices-do-not-match-spec
lib/ansible/modules/pip.py pylint:disallowed-name
lib/ansible/modules/replace.py validate-modules:nonexistent-parameter-documented
lib/ansible/modules/service.py validate-modules:nonexistent-parameter-documented
lib/ansible/modules/service.py validate-modules:use-run-command-not-popen
lib/ansible/modules/stat.py validate-modules:doc-default-does-not-match-spec # get_md5 is undocumented
lib/ansible/modules/stat.py validate-modules:parameter-invalid
lib/ansible/modules/stat.py validate-modules:parameter-type-not-in-doc
lib/ansible/modules/stat.py validate-modules:undocumented-parameter
lib/ansible/modules/systemd_service.py validate-modules:parameter-invalid
lib/ansible/modules/systemd_service.py validate-modules:return-syntax-error
lib/ansible/modules/sysvinit.py validate-modules:return-syntax-error
lib/ansible/modules/uri.py validate-modules:doc-required-mismatch
lib/ansible/modules/user.py validate-modules:doc-default-does-not-match-spec
lib/ansible/modules/user.py validate-modules:use-run-command-not-popen
lib/ansible/modules/yum.py pylint:disallowed-name
lib/ansible/modules/yum.py validate-modules:parameter-invalid
lib/ansible/modules/yum_repository.py validate-modules:doc-default-does-not-match-spec
lib/ansible/modules/yum_repository.py validate-modules:parameter-type-not-in-doc
lib/ansible/modules/yum_repository.py validate-modules:undocumented-parameter
lib/ansible/module_utils/compat/_selectors2.py future-import-boilerplate # ignore bundled
lib/ansible/module_utils/compat/_selectors2.py metaclass-boilerplate # ignore bundled
lib/ansible/module_utils/compat/_selectors2.py pylint:disallowed-name
lib/ansible/module_utils/compat/selinux.py import-2.7!skip # pass/fail depends on presence of libselinux.so
lib/ansible/module_utils/compat/selinux.py import-3.5!skip # pass/fail depends on presence of libselinux.so
lib/ansible/module_utils/compat/selinux.py import-3.6!skip # pass/fail depends on presence of libselinux.so
lib/ansible/module_utils/compat/selinux.py import-3.7!skip # pass/fail depends on presence of libselinux.so
lib/ansible/module_utils/compat/selinux.py import-3.8!skip # pass/fail depends on presence of libselinux.so
lib/ansible/module_utils/compat/selinux.py import-3.9!skip # pass/fail depends on presence of libselinux.so
lib/ansible/module_utils/distro/_distro.py future-import-boilerplate # ignore bundled
lib/ansible/module_utils/distro/_distro.py metaclass-boilerplate # ignore bundled
lib/ansible/module_utils/distro/_distro.py no-assert
lib/ansible/module_utils/distro/_distro.py pylint:using-constant-test # bundled code we don't want to modify
lib/ansible/module_utils/distro/_distro.py pep8!skip # bundled code we don't want to modify
lib/ansible/module_utils/distro/__init__.py empty-init # breaks namespacing, bundled, do not override
lib/ansible/module_utils/facts/__init__.py empty-init # breaks namespacing, deprecate and eventually remove
lib/ansible/module_utils/facts/network/linux.py pylint:disallowed-name
lib/ansible/module_utils/powershell/Ansible.ModuleUtils.ArgvParser.psm1 pslint:PSUseApprovedVerbs
lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1 pslint:PSProvideCommentHelp # need to agree on best format for comment location
lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1 pslint:PSUseApprovedVerbs
lib/ansible/module_utils/powershell/Ansible.ModuleUtils.FileUtil.psm1 pslint:PSCustomUseLiteralPath
lib/ansible/module_utils/powershell/Ansible.ModuleUtils.FileUtil.psm1 pslint:PSProvideCommentHelp
lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1 pslint:PSCustomUseLiteralPath
lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1 pslint:PSUseApprovedVerbs
lib/ansible/module_utils/powershell/Ansible.ModuleUtils.LinkUtil.psm1 pslint:PSUseApprovedVerbs
lib/ansible/module_utils/pycompat24.py no-get-exception
lib/ansible/module_utils/six/__init__.py empty-init # breaks namespacing, bundled, do not override
lib/ansible/module_utils/six/__init__.py future-import-boilerplate # ignore bundled
lib/ansible/module_utils/six/__init__.py metaclass-boilerplate # ignore bundled
lib/ansible/module_utils/six/__init__.py no-basestring
lib/ansible/module_utils/six/__init__.py no-dict-iteritems
lib/ansible/module_utils/six/__init__.py no-dict-iterkeys
lib/ansible/module_utils/six/__init__.py no-dict-itervalues
lib/ansible/module_utils/six/__init__.py pylint:self-assigning-variable
lib/ansible/module_utils/six/__init__.py replace-urlopen
lib/ansible/module_utils/urls.py pylint:arguments-renamed
lib/ansible/module_utils/urls.py pylint:disallowed-name
lib/ansible/module_utils/urls.py replace-urlopen
lib/ansible/parsing/vault/__init__.py pylint:disallowed-name
lib/ansible/parsing/yaml/objects.py pylint:arguments-renamed
lib/ansible/playbook/base.py pylint:disallowed-name
lib/ansible/playbook/collectionsearch.py required-and-default-attributes # https://github.com/ansible/ansible/issues/61460
lib/ansible/playbook/helpers.py pylint:disallowed-name
lib/ansible/plugins/action/normal.py action-plugin-docs # default action plugin for modules without a dedicated action plugin
lib/ansible/plugins/cache/base.py ansible-doc!skip # not a plugin, but a stub for backwards compatibility
lib/ansible/plugins/callback/__init__.py pylint:arguments-renamed
lib/ansible/plugins/inventory/advanced_host_list.py pylint:arguments-renamed
lib/ansible/plugins/inventory/host_list.py pylint:arguments-renamed
lib/ansible/plugins/lookup/random_choice.py pylint:arguments-renamed
lib/ansible/plugins/lookup/sequence.py pylint:disallowed-name
lib/ansible/plugins/shell/cmd.py pylint:arguments-renamed
lib/ansible/plugins/strategy/__init__.py pylint:disallowed-name
lib/ansible/plugins/strategy/linear.py pylint:disallowed-name
lib/ansible/utils/collection_loader/_collection_finder.py pylint:deprecated-class
lib/ansible/utils/collection_loader/_collection_meta.py pylint:deprecated-class
lib/ansible/vars/hostvars.py pylint:disallowed-name
test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/hello.py pylint:relative-beyond-top-level
test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/files/bad.py pylint:ansible-bad-function # ignore, required for testing
test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/files/bad.py pylint:ansible-bad-import-from # ignore, required for testing
test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/files/bad.py pylint:ansible-bad-import # ignore, required for testing
test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py pylint:relative-beyond-top-level
test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py pylint:relative-beyond-top-level
test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/hello.py pylint:relative-beyond-top-level
test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py pylint:relative-beyond-top-level
test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py pylint:relative-beyond-top-level
test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/vendored_pty.py pep8!skip # vendored code
test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/my_module.py pylint:relative-beyond-top-level
test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util2.py pylint:relative-beyond-top-level
test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util3.py pylint:relative-beyond-top-level
test/integration/targets/gathering_facts/library/bogus_facts shebang
test/integration/targets/gathering_facts/library/facts_one shebang
test/integration/targets/gathering_facts/library/facts_two shebang
test/integration/targets/incidental_win_reboot/templates/post_reboot.ps1 pslint!skip
test/integration/targets/json_cleanup/library/bad_json shebang
test/integration/targets/lookup_csvfile/files/crlf.csv line-endings
test/integration/targets/lookup_ini/lookup-8859-15.ini no-smart-quotes
test/integration/targets/module_precedence/lib_with_extension/a.ini shebang
test/integration/targets/module_precedence/lib_with_extension/ping.ini shebang
test/integration/targets/module_precedence/roles_with_extension/foo/library/a.ini shebang
test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.ini shebang
test/integration/targets/module_utils/library/test.py future-import-boilerplate # allow testing of Python 2.x implicit relative imports
test/integration/targets/module_utils/module_utils/bar0/foo.py pylint:disallowed-name
test/integration/targets/module_utils/module_utils/foo.py pylint:disallowed-name
test/integration/targets/module_utils/module_utils/sub/bar/bar.py pylint:disallowed-name
test/integration/targets/module_utils/module_utils/sub/bar/__init__.py pylint:disallowed-name
test/integration/targets/module_utils/module_utils/yak/zebra/foo.py pylint:disallowed-name
test/integration/targets/old_style_modules_posix/library/helloworld.sh shebang
test/integration/targets/template/files/encoding_1252_utf-8.expected no-smart-quotes
test/integration/targets/template/files/encoding_1252_windows-1252.expected no-smart-quotes
test/integration/targets/template/files/foo.dos.txt line-endings
test/integration/targets/template/templates/encoding_1252.j2 no-smart-quotes
test/integration/targets/unicode/unicode.yml no-smart-quotes
test/integration/targets/windows-minimal/library/win_ping_syntax_error.ps1 pslint!skip
test/integration/targets/win_exec_wrapper/library/test_fail.ps1 pslint:PSCustomUseLiteralPath
test/integration/targets/win_exec_wrapper/tasks/main.yml no-smart-quotes # We are explicitly testing smart quote support for env vars
test/integration/targets/win_fetch/tasks/main.yml no-smart-quotes # We are explictly testing smart quotes in the file name to fetch
test/integration/targets/win_module_utils/library/legacy_only_new_way_win_line_ending.ps1 line-endings # Explicitly tests that we still work with Windows line endings
test/integration/targets/win_module_utils/library/legacy_only_old_way_win_line_ending.ps1 line-endings # Explicitly tests that we still work with Windows line endings
test/integration/targets/win_script/files/test_script.ps1 pslint:PSAvoidUsingWriteHost # Keep
test/integration/targets/win_script/files/test_script_removes_file.ps1 pslint:PSCustomUseLiteralPath
test/integration/targets/win_script/files/test_script_with_args.ps1 pslint:PSAvoidUsingWriteHost # Keep
test/integration/targets/win_script/files/test_script_with_splatting.ps1 pslint:PSAvoidUsingWriteHost # Keep
test/lib/ansible_test/_data/requirements/sanity.pslint.ps1 pslint:PSCustomUseLiteralPath # Uses wildcards on purpose
test/lib/ansible_test/_util/target/setup/ConfigureRemotingForAnsible.ps1 pslint:PSCustomUseLiteralPath
test/lib/ansible_test/_util/target/setup/requirements.py replace-urlopen
test/support/integration/plugins/modules/timezone.py pylint:disallowed-name
test/support/integration/plugins/module_utils/compat/ipaddress.py future-import-boilerplate
test/support/integration/plugins/module_utils/compat/ipaddress.py metaclass-boilerplate
test/support/integration/plugins/module_utils/compat/ipaddress.py no-unicode-literals
test/support/integration/plugins/module_utils/network/common/utils.py future-import-boilerplate
test/support/integration/plugins/module_utils/network/common/utils.py metaclass-boilerplate
test/support/integration/plugins/module_utils/network/common/utils.py pylint:use-a-generator
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/network.py pylint:consider-using-dict-comprehension
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py no-unicode-literals
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py pep8:E203
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py pylint:unnecessary-comprehension
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py pylint:use-a-generator
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/netconf/default.py pylint:unnecessary-comprehension
test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/cliconf/ios.py pylint:arguments-renamed
test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py pep8:E501
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/cliconf/vyos.py pylint:arguments-renamed
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py pep8:E231
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py pylint:disallowed-name
test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/module_utils/WebRequest.psm1 pslint!skip
test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_uri.ps1 pslint!skip
test/support/windows-integration/plugins/modules/async_status.ps1 pslint!skip
test/support/windows-integration/plugins/modules/setup.ps1 pslint!skip
test/support/windows-integration/plugins/modules/slurp.ps1 pslint!skip
test/support/windows-integration/plugins/modules/win_acl.ps1 pslint!skip
test/support/windows-integration/plugins/modules/win_certificate_store.ps1 pslint!skip
test/support/windows-integration/plugins/modules/win_command.ps1 pslint!skip
test/support/windows-integration/plugins/modules/win_copy.ps1 pslint!skip
test/support/windows-integration/plugins/modules/win_file.ps1 pslint!skip
test/support/windows-integration/plugins/modules/win_get_url.ps1 pslint!skip
test/support/windows-integration/plugins/modules/win_lineinfile.ps1 pslint!skip
test/support/windows-integration/plugins/modules/win_regedit.ps1 pslint!skip
test/support/windows-integration/plugins/modules/win_shell.ps1 pslint!skip
test/support/windows-integration/plugins/modules/win_stat.ps1 pslint!skip
test/support/windows-integration/plugins/modules/win_tempfile.ps1 pslint!skip
test/support/windows-integration/plugins/modules/win_user_right.ps1 pslint!skip
test/support/windows-integration/plugins/modules/win_user.ps1 pslint!skip
test/support/windows-integration/plugins/modules/win_wait_for.ps1 pslint!skip
test/support/windows-integration/plugins/modules/win_whoami.ps1 pslint!skip
test/units/executor/test_play_iterator.py pylint:disallowed-name
test/units/modules/test_apt.py pylint:disallowed-name
test/units/module_utils/basic/test_deprecate_warn.py pylint:ansible-deprecated-no-version
test/units/module_utils/basic/test_deprecate_warn.py pylint:ansible-deprecated-version
test/units/module_utils/basic/test_run_command.py pylint:disallowed-name
test/units/module_utils/urls/fixtures/multipart.txt line-endings # Fixture for HTTP tests that use CRLF
test/units/module_utils/urls/test_fetch_url.py replace-urlopen
test/units/module_utils/urls/test_gzip.py replace-urlopen
test/units/module_utils/urls/test_Request.py replace-urlopen
test/units/parsing/vault/test_vault.py pylint:disallowed-name
test/units/playbook/role/test_role.py pylint:disallowed-name
test/units/plugins/test_plugins.py pylint:disallowed-name
test/units/template/test_templar.py pylint:disallowed-name
test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/action/my_action.py pylint:relative-beyond-top-level
test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/__init__.py empty-init # testing that collections don't need inits
test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/ansible/__init__.py empty-init # testing that collections don't need inits
test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/__init__.py empty-init # testing that collections don't need inits
test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/__init__.py empty-init # testing that collections don't need inits
test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/testcoll/__init__.py empty-init # testing that collections don't need inits
test/units/utils/collection_loader/test_collection_loader.py pylint:undefined-variable # magic runtime local var splatting
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,490 |
Wrong module FQCN computed by sanity test `ansible-doc` when having a deep module struct
|
### Summary
Hello,
We are developing a new custom collection that has the following deep module structure:
```
plugins/modules/
├── backup
│ ├── backint
│ │ ├── __init__.py
│ │ └── install.py
│ ├── configurator.py
│ ├── executor.py
│ ├── __init__.py
│ └── scheduler.py
├── certificates
│ ├── info.py
│ └── __init__.py
├── hdbuserstore
│ ├── editor.py
│ ├── info.py
│ └── __init__.py
├── __init__.py
├── parameters
│ ├── editor.py
│ ├── __init__.py
│ └── validator.py
├── procedure.py
├── pse
│ ├── certificates_info.py
│ └── __init__.py
└── query.py
```
So now if we run the command `ansible-test sanity --test ansible-doc`, it is failing with this error:
```
Running sanity test "ansible-doc"
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
The issue seems to be related to a bug in the way the class `ansible_test._internal.commands.sanity.ansible_doc.AnsibleDocTest` computes the module FQCN inside the method `test`. Indeed the issue is in the next `for` loop inside that method which is missing to add the rest of the module FQCN when appending values to dict `doc_targets` and `target_paths`:
```python
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_name = os.path.splitext(os.path.basename(plugin_file_path))[0]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
doc_targets[plugin_type].append(data_context().content.prefix + plugin_name)
target_paths[plugin_type][data_context().content.prefix + plugin_name] = plugin_file_path
```
You may be fixing this issue up with something like the next code excerpt which is taking into consideration the whole module FQCN:
```python
module_path = os.path.join(data_context().content.module_path, '')
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_path_noext = os.path.splitext(plugin_file_path)[0]
plugin_name_parts = plugin_path_noext.replace(module_path, '', 1).split('/')
plugin_ctx, plugin_name = plugin_name_parts[:-1], plugin_name_parts[-1]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
if plugin_ctx:
plugin_full_ctx = data_context().content.prefix + '.'.join(plugin_ctx) + '.'
else:
plugin_full_ctx = data_context().content.prefix
plugin_full_name = plugin_full_ctx + plugin_name
doc_targets[plugin_type].append(plugin_full_name)
target_paths[plugin_type][plugin_full_name] = plugin_file_path
```
Thanks in advanced for your support.
Cheers
Jose M. Prieto
### Issue Type
Bug Report
### Component Name
ansible-test sanity --test ansible-doc
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/users/priejos/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/lib64/python3.8/site-packages/ansible
ansible collection location = /usr/sap/CEXIE/team/priejos
executable location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/bin/ansible
python version = 3.8.11 (default, Jul 23 2021, 14:55:16) [GCC 9.1.1 20190605 (Red Hat 9.1.1-2)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
COLLECTIONS_PATHS(env: ANSIBLE_COLLECTIONS_PATH) = ['/usr/sap/CEXIE/team/priejos']
HOST_KEY_CHECKING(/etc/ansible/ansible.cfg) = False
CONNECTION:
==========
paramiko_ssh:
____________
host_key_checking(/etc/ansible/ansible.cfg) = False
ssh:
___
host_key_checking(/etc/ansible/ansible.cfg) = False
pipelining(/etc/ansible/ansible.cfg) = True
scp_if_ssh(/etc/ansible/ansible.cfg) = True
ssh_args(/etc/ansible/ansible.cfg) = -o ConnectionAttempts=20
```
### OS / Environment
$ lsb_release -a
LSB Version: :core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch
Distributor ID: RedHatEnterpriseServer
Description: Red Hat Enterprise Linux Server release 7.9 (Maipo)
Release: 7.9
Codename: Maipo
### Steps to Reproduce
1. Try to have a deep module structure like the one I pasted in the summary section
2. Execute the ansible-doc sanity check like `ansible-test sanity --test ansible-doc`
### Expected Results
The sanity check `ansible-doc` to recognize the right module FQCN in a deep structure like shown in section "Summary".
### Actual Results
```console
$ ansible-test sanity --test ansible-doc -v --debug
Creating container database.
Read 5 sanity test ignore line(s) for Ansible 2.13 from: tests/sanity/ignore-2.13.txt
Running sanity test "ansible-doc"
Initializing "/tmp/ansible-test-7fr64tiv-injector" as the temporary injector directory.
Run command: ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78490
|
https://github.com/ansible/ansible/pull/78518
|
9eb3d6811bfbdc9c776f2c549c642a9d8db3fa56
|
2b63fdd1b8dd8383a9b09b601d80623cd76ce579
| 2022-08-10T12:14:10Z |
python
| 2022-08-11T17:53:23Z |
changelogs/fragments/ansible-test-ansible-doc-sanity-fqcn.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,490 |
Wrong module FQCN computed by sanity test `ansible-doc` when having a deep module struct
|
### Summary
Hello,
We are developing a new custom collection that has the following deep module structure:
```
plugins/modules/
├── backup
│ ├── backint
│ │ ├── __init__.py
│ │ └── install.py
│ ├── configurator.py
│ ├── executor.py
│ ├── __init__.py
│ └── scheduler.py
├── certificates
│ ├── info.py
│ └── __init__.py
├── hdbuserstore
│ ├── editor.py
│ ├── info.py
│ └── __init__.py
├── __init__.py
├── parameters
│ ├── editor.py
│ ├── __init__.py
│ └── validator.py
├── procedure.py
├── pse
│ ├── certificates_info.py
│ └── __init__.py
└── query.py
```
So now if we run the command `ansible-test sanity --test ansible-doc`, it is failing with this error:
```
Running sanity test "ansible-doc"
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
The issue seems to be related to a bug in the way the class `ansible_test._internal.commands.sanity.ansible_doc.AnsibleDocTest` computes the module FQCN inside the method `test`. Indeed the issue is in the next `for` loop inside that method which is missing to add the rest of the module FQCN when appending values to dict `doc_targets` and `target_paths`:
```python
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_name = os.path.splitext(os.path.basename(plugin_file_path))[0]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
doc_targets[plugin_type].append(data_context().content.prefix + plugin_name)
target_paths[plugin_type][data_context().content.prefix + plugin_name] = plugin_file_path
```
You may be fixing this issue up with something like the next code excerpt which is taking into consideration the whole module FQCN:
```python
module_path = os.path.join(data_context().content.module_path, '')
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_path_noext = os.path.splitext(plugin_file_path)[0]
plugin_name_parts = plugin_path_noext.replace(module_path, '', 1).split('/')
plugin_ctx, plugin_name = plugin_name_parts[:-1], plugin_name_parts[-1]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
if plugin_ctx:
plugin_full_ctx = data_context().content.prefix + '.'.join(plugin_ctx) + '.'
else:
plugin_full_ctx = data_context().content.prefix
plugin_full_name = plugin_full_ctx + plugin_name
doc_targets[plugin_type].append(plugin_full_name)
target_paths[plugin_type][plugin_full_name] = plugin_file_path
```
Thanks in advanced for your support.
Cheers
Jose M. Prieto
### Issue Type
Bug Report
### Component Name
ansible-test sanity --test ansible-doc
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/users/priejos/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/lib64/python3.8/site-packages/ansible
ansible collection location = /usr/sap/CEXIE/team/priejos
executable location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/bin/ansible
python version = 3.8.11 (default, Jul 23 2021, 14:55:16) [GCC 9.1.1 20190605 (Red Hat 9.1.1-2)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
COLLECTIONS_PATHS(env: ANSIBLE_COLLECTIONS_PATH) = ['/usr/sap/CEXIE/team/priejos']
HOST_KEY_CHECKING(/etc/ansible/ansible.cfg) = False
CONNECTION:
==========
paramiko_ssh:
____________
host_key_checking(/etc/ansible/ansible.cfg) = False
ssh:
___
host_key_checking(/etc/ansible/ansible.cfg) = False
pipelining(/etc/ansible/ansible.cfg) = True
scp_if_ssh(/etc/ansible/ansible.cfg) = True
ssh_args(/etc/ansible/ansible.cfg) = -o ConnectionAttempts=20
```
### OS / Environment
$ lsb_release -a
LSB Version: :core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch
Distributor ID: RedHatEnterpriseServer
Description: Red Hat Enterprise Linux Server release 7.9 (Maipo)
Release: 7.9
Codename: Maipo
### Steps to Reproduce
1. Try to have a deep module structure like the one I pasted in the summary section
2. Execute the ansible-doc sanity check like `ansible-test sanity --test ansible-doc`
### Expected Results
The sanity check `ansible-doc` to recognize the right module FQCN in a deep structure like shown in section "Summary".
### Actual Results
```console
$ ansible-test sanity --test ansible-doc -v --debug
Creating container database.
Read 5 sanity test ignore line(s) for Ansible 2.13 from: tests/sanity/ignore-2.13.txt
Running sanity test "ansible-doc"
Initializing "/tmp/ansible-test-7fr64tiv-injector" as the temporary injector directory.
Run command: ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78490
|
https://github.com/ansible/ansible/pull/78518
|
9eb3d6811bfbdc9c776f2c549c642a9d8db3fa56
|
2b63fdd1b8dd8383a9b09b601d80623cd76ce579
| 2022-08-10T12:14:10Z |
python
| 2022-08-11T17:53:23Z |
test/integration/targets/ansible-test-sanity-ansible-doc/aliases
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,490 |
Wrong module FQCN computed by sanity test `ansible-doc` when having a deep module struct
|
### Summary
Hello,
We are developing a new custom collection that has the following deep module structure:
```
plugins/modules/
├── backup
│ ├── backint
│ │ ├── __init__.py
│ │ └── install.py
│ ├── configurator.py
│ ├── executor.py
│ ├── __init__.py
│ └── scheduler.py
├── certificates
│ ├── info.py
│ └── __init__.py
├── hdbuserstore
│ ├── editor.py
│ ├── info.py
│ └── __init__.py
├── __init__.py
├── parameters
│ ├── editor.py
│ ├── __init__.py
│ └── validator.py
├── procedure.py
├── pse
│ ├── certificates_info.py
│ └── __init__.py
└── query.py
```
So now if we run the command `ansible-test sanity --test ansible-doc`, it is failing with this error:
```
Running sanity test "ansible-doc"
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
The issue seems to be related to a bug in the way the class `ansible_test._internal.commands.sanity.ansible_doc.AnsibleDocTest` computes the module FQCN inside the method `test`. Indeed the issue is in the next `for` loop inside that method which is missing to add the rest of the module FQCN when appending values to dict `doc_targets` and `target_paths`:
```python
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_name = os.path.splitext(os.path.basename(plugin_file_path))[0]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
doc_targets[plugin_type].append(data_context().content.prefix + plugin_name)
target_paths[plugin_type][data_context().content.prefix + plugin_name] = plugin_file_path
```
You may be fixing this issue up with something like the next code excerpt which is taking into consideration the whole module FQCN:
```python
module_path = os.path.join(data_context().content.module_path, '')
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_path_noext = os.path.splitext(plugin_file_path)[0]
plugin_name_parts = plugin_path_noext.replace(module_path, '', 1).split('/')
plugin_ctx, plugin_name = plugin_name_parts[:-1], plugin_name_parts[-1]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
if plugin_ctx:
plugin_full_ctx = data_context().content.prefix + '.'.join(plugin_ctx) + '.'
else:
plugin_full_ctx = data_context().content.prefix
plugin_full_name = plugin_full_ctx + plugin_name
doc_targets[plugin_type].append(plugin_full_name)
target_paths[plugin_type][plugin_full_name] = plugin_file_path
```
Thanks in advanced for your support.
Cheers
Jose M. Prieto
### Issue Type
Bug Report
### Component Name
ansible-test sanity --test ansible-doc
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/users/priejos/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/lib64/python3.8/site-packages/ansible
ansible collection location = /usr/sap/CEXIE/team/priejos
executable location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/bin/ansible
python version = 3.8.11 (default, Jul 23 2021, 14:55:16) [GCC 9.1.1 20190605 (Red Hat 9.1.1-2)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
COLLECTIONS_PATHS(env: ANSIBLE_COLLECTIONS_PATH) = ['/usr/sap/CEXIE/team/priejos']
HOST_KEY_CHECKING(/etc/ansible/ansible.cfg) = False
CONNECTION:
==========
paramiko_ssh:
____________
host_key_checking(/etc/ansible/ansible.cfg) = False
ssh:
___
host_key_checking(/etc/ansible/ansible.cfg) = False
pipelining(/etc/ansible/ansible.cfg) = True
scp_if_ssh(/etc/ansible/ansible.cfg) = True
ssh_args(/etc/ansible/ansible.cfg) = -o ConnectionAttempts=20
```
### OS / Environment
$ lsb_release -a
LSB Version: :core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch
Distributor ID: RedHatEnterpriseServer
Description: Red Hat Enterprise Linux Server release 7.9 (Maipo)
Release: 7.9
Codename: Maipo
### Steps to Reproduce
1. Try to have a deep module structure like the one I pasted in the summary section
2. Execute the ansible-doc sanity check like `ansible-test sanity --test ansible-doc`
### Expected Results
The sanity check `ansible-doc` to recognize the right module FQCN in a deep structure like shown in section "Summary".
### Actual Results
```console
$ ansible-test sanity --test ansible-doc -v --debug
Creating container database.
Read 5 sanity test ignore line(s) for Ansible 2.13 from: tests/sanity/ignore-2.13.txt
Running sanity test "ansible-doc"
Initializing "/tmp/ansible-test-7fr64tiv-injector" as the temporary injector directory.
Run command: ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78490
|
https://github.com/ansible/ansible/pull/78518
|
9eb3d6811bfbdc9c776f2c549c642a9d8db3fa56
|
2b63fdd1b8dd8383a9b09b601d80623cd76ce579
| 2022-08-10T12:14:10Z |
python
| 2022-08-11T17:53:23Z |
test/integration/targets/ansible-test-sanity-ansible-doc/ansible_collections/ns/col/plugins/lookup/a/b/lookup2.py
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,490 |
Wrong module FQCN computed by sanity test `ansible-doc` when having a deep module struct
|
### Summary
Hello,
We are developing a new custom collection that has the following deep module structure:
```
plugins/modules/
├── backup
│ ├── backint
│ │ ├── __init__.py
│ │ └── install.py
│ ├── configurator.py
│ ├── executor.py
│ ├── __init__.py
│ └── scheduler.py
├── certificates
│ ├── info.py
│ └── __init__.py
├── hdbuserstore
│ ├── editor.py
│ ├── info.py
│ └── __init__.py
├── __init__.py
├── parameters
│ ├── editor.py
│ ├── __init__.py
│ └── validator.py
├── procedure.py
├── pse
│ ├── certificates_info.py
│ └── __init__.py
└── query.py
```
So now if we run the command `ansible-test sanity --test ansible-doc`, it is failing with this error:
```
Running sanity test "ansible-doc"
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
The issue seems to be related to a bug in the way the class `ansible_test._internal.commands.sanity.ansible_doc.AnsibleDocTest` computes the module FQCN inside the method `test`. Indeed the issue is in the next `for` loop inside that method which is missing to add the rest of the module FQCN when appending values to dict `doc_targets` and `target_paths`:
```python
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_name = os.path.splitext(os.path.basename(plugin_file_path))[0]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
doc_targets[plugin_type].append(data_context().content.prefix + plugin_name)
target_paths[plugin_type][data_context().content.prefix + plugin_name] = plugin_file_path
```
You may be fixing this issue up with something like the next code excerpt which is taking into consideration the whole module FQCN:
```python
module_path = os.path.join(data_context().content.module_path, '')
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_path_noext = os.path.splitext(plugin_file_path)[0]
plugin_name_parts = plugin_path_noext.replace(module_path, '', 1).split('/')
plugin_ctx, plugin_name = plugin_name_parts[:-1], plugin_name_parts[-1]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
if plugin_ctx:
plugin_full_ctx = data_context().content.prefix + '.'.join(plugin_ctx) + '.'
else:
plugin_full_ctx = data_context().content.prefix
plugin_full_name = plugin_full_ctx + plugin_name
doc_targets[plugin_type].append(plugin_full_name)
target_paths[plugin_type][plugin_full_name] = plugin_file_path
```
Thanks in advanced for your support.
Cheers
Jose M. Prieto
### Issue Type
Bug Report
### Component Name
ansible-test sanity --test ansible-doc
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/users/priejos/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/lib64/python3.8/site-packages/ansible
ansible collection location = /usr/sap/CEXIE/team/priejos
executable location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/bin/ansible
python version = 3.8.11 (default, Jul 23 2021, 14:55:16) [GCC 9.1.1 20190605 (Red Hat 9.1.1-2)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
COLLECTIONS_PATHS(env: ANSIBLE_COLLECTIONS_PATH) = ['/usr/sap/CEXIE/team/priejos']
HOST_KEY_CHECKING(/etc/ansible/ansible.cfg) = False
CONNECTION:
==========
paramiko_ssh:
____________
host_key_checking(/etc/ansible/ansible.cfg) = False
ssh:
___
host_key_checking(/etc/ansible/ansible.cfg) = False
pipelining(/etc/ansible/ansible.cfg) = True
scp_if_ssh(/etc/ansible/ansible.cfg) = True
ssh_args(/etc/ansible/ansible.cfg) = -o ConnectionAttempts=20
```
### OS / Environment
$ lsb_release -a
LSB Version: :core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch
Distributor ID: RedHatEnterpriseServer
Description: Red Hat Enterprise Linux Server release 7.9 (Maipo)
Release: 7.9
Codename: Maipo
### Steps to Reproduce
1. Try to have a deep module structure like the one I pasted in the summary section
2. Execute the ansible-doc sanity check like `ansible-test sanity --test ansible-doc`
### Expected Results
The sanity check `ansible-doc` to recognize the right module FQCN in a deep structure like shown in section "Summary".
### Actual Results
```console
$ ansible-test sanity --test ansible-doc -v --debug
Creating container database.
Read 5 sanity test ignore line(s) for Ansible 2.13 from: tests/sanity/ignore-2.13.txt
Running sanity test "ansible-doc"
Initializing "/tmp/ansible-test-7fr64tiv-injector" as the temporary injector directory.
Run command: ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78490
|
https://github.com/ansible/ansible/pull/78518
|
9eb3d6811bfbdc9c776f2c549c642a9d8db3fa56
|
2b63fdd1b8dd8383a9b09b601d80623cd76ce579
| 2022-08-10T12:14:10Z |
python
| 2022-08-11T17:53:23Z |
test/integration/targets/ansible-test-sanity-ansible-doc/ansible_collections/ns/col/plugins/lookup/lookup1.py
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,490 |
Wrong module FQCN computed by sanity test `ansible-doc` when having a deep module struct
|
### Summary
Hello,
We are developing a new custom collection that has the following deep module structure:
```
plugins/modules/
├── backup
│ ├── backint
│ │ ├── __init__.py
│ │ └── install.py
│ ├── configurator.py
│ ├── executor.py
│ ├── __init__.py
│ └── scheduler.py
├── certificates
│ ├── info.py
│ └── __init__.py
├── hdbuserstore
│ ├── editor.py
│ ├── info.py
│ └── __init__.py
├── __init__.py
├── parameters
│ ├── editor.py
│ ├── __init__.py
│ └── validator.py
├── procedure.py
├── pse
│ ├── certificates_info.py
│ └── __init__.py
└── query.py
```
So now if we run the command `ansible-test sanity --test ansible-doc`, it is failing with this error:
```
Running sanity test "ansible-doc"
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
The issue seems to be related to a bug in the way the class `ansible_test._internal.commands.sanity.ansible_doc.AnsibleDocTest` computes the module FQCN inside the method `test`. Indeed the issue is in the next `for` loop inside that method which is missing to add the rest of the module FQCN when appending values to dict `doc_targets` and `target_paths`:
```python
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_name = os.path.splitext(os.path.basename(plugin_file_path))[0]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
doc_targets[plugin_type].append(data_context().content.prefix + plugin_name)
target_paths[plugin_type][data_context().content.prefix + plugin_name] = plugin_file_path
```
You may be fixing this issue up with something like the next code excerpt which is taking into consideration the whole module FQCN:
```python
module_path = os.path.join(data_context().content.module_path, '')
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_path_noext = os.path.splitext(plugin_file_path)[0]
plugin_name_parts = plugin_path_noext.replace(module_path, '', 1).split('/')
plugin_ctx, plugin_name = plugin_name_parts[:-1], plugin_name_parts[-1]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
if plugin_ctx:
plugin_full_ctx = data_context().content.prefix + '.'.join(plugin_ctx) + '.'
else:
plugin_full_ctx = data_context().content.prefix
plugin_full_name = plugin_full_ctx + plugin_name
doc_targets[plugin_type].append(plugin_full_name)
target_paths[plugin_type][plugin_full_name] = plugin_file_path
```
Thanks in advanced for your support.
Cheers
Jose M. Prieto
### Issue Type
Bug Report
### Component Name
ansible-test sanity --test ansible-doc
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/users/priejos/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/lib64/python3.8/site-packages/ansible
ansible collection location = /usr/sap/CEXIE/team/priejos
executable location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/bin/ansible
python version = 3.8.11 (default, Jul 23 2021, 14:55:16) [GCC 9.1.1 20190605 (Red Hat 9.1.1-2)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
COLLECTIONS_PATHS(env: ANSIBLE_COLLECTIONS_PATH) = ['/usr/sap/CEXIE/team/priejos']
HOST_KEY_CHECKING(/etc/ansible/ansible.cfg) = False
CONNECTION:
==========
paramiko_ssh:
____________
host_key_checking(/etc/ansible/ansible.cfg) = False
ssh:
___
host_key_checking(/etc/ansible/ansible.cfg) = False
pipelining(/etc/ansible/ansible.cfg) = True
scp_if_ssh(/etc/ansible/ansible.cfg) = True
ssh_args(/etc/ansible/ansible.cfg) = -o ConnectionAttempts=20
```
### OS / Environment
$ lsb_release -a
LSB Version: :core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch
Distributor ID: RedHatEnterpriseServer
Description: Red Hat Enterprise Linux Server release 7.9 (Maipo)
Release: 7.9
Codename: Maipo
### Steps to Reproduce
1. Try to have a deep module structure like the one I pasted in the summary section
2. Execute the ansible-doc sanity check like `ansible-test sanity --test ansible-doc`
### Expected Results
The sanity check `ansible-doc` to recognize the right module FQCN in a deep structure like shown in section "Summary".
### Actual Results
```console
$ ansible-test sanity --test ansible-doc -v --debug
Creating container database.
Read 5 sanity test ignore line(s) for Ansible 2.13 from: tests/sanity/ignore-2.13.txt
Running sanity test "ansible-doc"
Initializing "/tmp/ansible-test-7fr64tiv-injector" as the temporary injector directory.
Run command: ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78490
|
https://github.com/ansible/ansible/pull/78518
|
9eb3d6811bfbdc9c776f2c549c642a9d8db3fa56
|
2b63fdd1b8dd8383a9b09b601d80623cd76ce579
| 2022-08-10T12:14:10Z |
python
| 2022-08-11T17:53:23Z |
test/integration/targets/ansible-test-sanity-ansible-doc/ansible_collections/ns/col/plugins/modules/a/b/module2.py
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,490 |
Wrong module FQCN computed by sanity test `ansible-doc` when having a deep module struct
|
### Summary
Hello,
We are developing a new custom collection that has the following deep module structure:
```
plugins/modules/
├── backup
│ ├── backint
│ │ ├── __init__.py
│ │ └── install.py
│ ├── configurator.py
│ ├── executor.py
│ ├── __init__.py
│ └── scheduler.py
├── certificates
│ ├── info.py
│ └── __init__.py
├── hdbuserstore
│ ├── editor.py
│ ├── info.py
│ └── __init__.py
├── __init__.py
├── parameters
│ ├── editor.py
│ ├── __init__.py
│ └── validator.py
├── procedure.py
├── pse
│ ├── certificates_info.py
│ └── __init__.py
└── query.py
```
So now if we run the command `ansible-test sanity --test ansible-doc`, it is failing with this error:
```
Running sanity test "ansible-doc"
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
The issue seems to be related to a bug in the way the class `ansible_test._internal.commands.sanity.ansible_doc.AnsibleDocTest` computes the module FQCN inside the method `test`. Indeed the issue is in the next `for` loop inside that method which is missing to add the rest of the module FQCN when appending values to dict `doc_targets` and `target_paths`:
```python
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_name = os.path.splitext(os.path.basename(plugin_file_path))[0]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
doc_targets[plugin_type].append(data_context().content.prefix + plugin_name)
target_paths[plugin_type][data_context().content.prefix + plugin_name] = plugin_file_path
```
You may be fixing this issue up with something like the next code excerpt which is taking into consideration the whole module FQCN:
```python
module_path = os.path.join(data_context().content.module_path, '')
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_path_noext = os.path.splitext(plugin_file_path)[0]
plugin_name_parts = plugin_path_noext.replace(module_path, '', 1).split('/')
plugin_ctx, plugin_name = plugin_name_parts[:-1], plugin_name_parts[-1]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
if plugin_ctx:
plugin_full_ctx = data_context().content.prefix + '.'.join(plugin_ctx) + '.'
else:
plugin_full_ctx = data_context().content.prefix
plugin_full_name = plugin_full_ctx + plugin_name
doc_targets[plugin_type].append(plugin_full_name)
target_paths[plugin_type][plugin_full_name] = plugin_file_path
```
Thanks in advanced for your support.
Cheers
Jose M. Prieto
### Issue Type
Bug Report
### Component Name
ansible-test sanity --test ansible-doc
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/users/priejos/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/lib64/python3.8/site-packages/ansible
ansible collection location = /usr/sap/CEXIE/team/priejos
executable location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/bin/ansible
python version = 3.8.11 (default, Jul 23 2021, 14:55:16) [GCC 9.1.1 20190605 (Red Hat 9.1.1-2)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
COLLECTIONS_PATHS(env: ANSIBLE_COLLECTIONS_PATH) = ['/usr/sap/CEXIE/team/priejos']
HOST_KEY_CHECKING(/etc/ansible/ansible.cfg) = False
CONNECTION:
==========
paramiko_ssh:
____________
host_key_checking(/etc/ansible/ansible.cfg) = False
ssh:
___
host_key_checking(/etc/ansible/ansible.cfg) = False
pipelining(/etc/ansible/ansible.cfg) = True
scp_if_ssh(/etc/ansible/ansible.cfg) = True
ssh_args(/etc/ansible/ansible.cfg) = -o ConnectionAttempts=20
```
### OS / Environment
$ lsb_release -a
LSB Version: :core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch
Distributor ID: RedHatEnterpriseServer
Description: Red Hat Enterprise Linux Server release 7.9 (Maipo)
Release: 7.9
Codename: Maipo
### Steps to Reproduce
1. Try to have a deep module structure like the one I pasted in the summary section
2. Execute the ansible-doc sanity check like `ansible-test sanity --test ansible-doc`
### Expected Results
The sanity check `ansible-doc` to recognize the right module FQCN in a deep structure like shown in section "Summary".
### Actual Results
```console
$ ansible-test sanity --test ansible-doc -v --debug
Creating container database.
Read 5 sanity test ignore line(s) for Ansible 2.13 from: tests/sanity/ignore-2.13.txt
Running sanity test "ansible-doc"
Initializing "/tmp/ansible-test-7fr64tiv-injector" as the temporary injector directory.
Run command: ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78490
|
https://github.com/ansible/ansible/pull/78518
|
9eb3d6811bfbdc9c776f2c549c642a9d8db3fa56
|
2b63fdd1b8dd8383a9b09b601d80623cd76ce579
| 2022-08-10T12:14:10Z |
python
| 2022-08-11T17:53:23Z |
test/integration/targets/ansible-test-sanity-ansible-doc/ansible_collections/ns/col/plugins/modules/module1.py
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,490 |
Wrong module FQCN computed by sanity test `ansible-doc` when having a deep module struct
|
### Summary
Hello,
We are developing a new custom collection that has the following deep module structure:
```
plugins/modules/
├── backup
│ ├── backint
│ │ ├── __init__.py
│ │ └── install.py
│ ├── configurator.py
│ ├── executor.py
│ ├── __init__.py
│ └── scheduler.py
├── certificates
│ ├── info.py
│ └── __init__.py
├── hdbuserstore
│ ├── editor.py
│ ├── info.py
│ └── __init__.py
├── __init__.py
├── parameters
│ ├── editor.py
│ ├── __init__.py
│ └── validator.py
├── procedure.py
├── pse
│ ├── certificates_info.py
│ └── __init__.py
└── query.py
```
So now if we run the command `ansible-test sanity --test ansible-doc`, it is failing with this error:
```
Running sanity test "ansible-doc"
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
The issue seems to be related to a bug in the way the class `ansible_test._internal.commands.sanity.ansible_doc.AnsibleDocTest` computes the module FQCN inside the method `test`. Indeed the issue is in the next `for` loop inside that method which is missing to add the rest of the module FQCN when appending values to dict `doc_targets` and `target_paths`:
```python
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_name = os.path.splitext(os.path.basename(plugin_file_path))[0]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
doc_targets[plugin_type].append(data_context().content.prefix + plugin_name)
target_paths[plugin_type][data_context().content.prefix + plugin_name] = plugin_file_path
```
You may be fixing this issue up with something like the next code excerpt which is taking into consideration the whole module FQCN:
```python
module_path = os.path.join(data_context().content.module_path, '')
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_path_noext = os.path.splitext(plugin_file_path)[0]
plugin_name_parts = plugin_path_noext.replace(module_path, '', 1).split('/')
plugin_ctx, plugin_name = plugin_name_parts[:-1], plugin_name_parts[-1]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
if plugin_ctx:
plugin_full_ctx = data_context().content.prefix + '.'.join(plugin_ctx) + '.'
else:
plugin_full_ctx = data_context().content.prefix
plugin_full_name = plugin_full_ctx + plugin_name
doc_targets[plugin_type].append(plugin_full_name)
target_paths[plugin_type][plugin_full_name] = plugin_file_path
```
Thanks in advanced for your support.
Cheers
Jose M. Prieto
### Issue Type
Bug Report
### Component Name
ansible-test sanity --test ansible-doc
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/users/priejos/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/lib64/python3.8/site-packages/ansible
ansible collection location = /usr/sap/CEXIE/team/priejos
executable location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/bin/ansible
python version = 3.8.11 (default, Jul 23 2021, 14:55:16) [GCC 9.1.1 20190605 (Red Hat 9.1.1-2)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
COLLECTIONS_PATHS(env: ANSIBLE_COLLECTIONS_PATH) = ['/usr/sap/CEXIE/team/priejos']
HOST_KEY_CHECKING(/etc/ansible/ansible.cfg) = False
CONNECTION:
==========
paramiko_ssh:
____________
host_key_checking(/etc/ansible/ansible.cfg) = False
ssh:
___
host_key_checking(/etc/ansible/ansible.cfg) = False
pipelining(/etc/ansible/ansible.cfg) = True
scp_if_ssh(/etc/ansible/ansible.cfg) = True
ssh_args(/etc/ansible/ansible.cfg) = -o ConnectionAttempts=20
```
### OS / Environment
$ lsb_release -a
LSB Version: :core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch
Distributor ID: RedHatEnterpriseServer
Description: Red Hat Enterprise Linux Server release 7.9 (Maipo)
Release: 7.9
Codename: Maipo
### Steps to Reproduce
1. Try to have a deep module structure like the one I pasted in the summary section
2. Execute the ansible-doc sanity check like `ansible-test sanity --test ansible-doc`
### Expected Results
The sanity check `ansible-doc` to recognize the right module FQCN in a deep structure like shown in section "Summary".
### Actual Results
```console
$ ansible-test sanity --test ansible-doc -v --debug
Creating container database.
Read 5 sanity test ignore line(s) for Ansible 2.13 from: tests/sanity/ignore-2.13.txt
Running sanity test "ansible-doc"
Initializing "/tmp/ansible-test-7fr64tiv-injector" as the temporary injector directory.
Run command: ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78490
|
https://github.com/ansible/ansible/pull/78518
|
9eb3d6811bfbdc9c776f2c549c642a9d8db3fa56
|
2b63fdd1b8dd8383a9b09b601d80623cd76ce579
| 2022-08-10T12:14:10Z |
python
| 2022-08-11T17:53:23Z |
test/integration/targets/ansible-test-sanity-ansible-doc/runme.sh
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,490 |
Wrong module FQCN computed by sanity test `ansible-doc` when having a deep module struct
|
### Summary
Hello,
We are developing a new custom collection that has the following deep module structure:
```
plugins/modules/
├── backup
│ ├── backint
│ │ ├── __init__.py
│ │ └── install.py
│ ├── configurator.py
│ ├── executor.py
│ ├── __init__.py
│ └── scheduler.py
├── certificates
│ ├── info.py
│ └── __init__.py
├── hdbuserstore
│ ├── editor.py
│ ├── info.py
│ └── __init__.py
├── __init__.py
├── parameters
│ ├── editor.py
│ ├── __init__.py
│ └── validator.py
├── procedure.py
├── pse
│ ├── certificates_info.py
│ └── __init__.py
└── query.py
```
So now if we run the command `ansible-test sanity --test ansible-doc`, it is failing with this error:
```
Running sanity test "ansible-doc"
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-2z8ljk2u/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
The issue seems to be related to a bug in the way the class `ansible_test._internal.commands.sanity.ansible_doc.AnsibleDocTest` computes the module FQCN inside the method `test`. Indeed the issue is in the next `for` loop inside that method which is missing to add the rest of the module FQCN when appending values to dict `doc_targets` and `target_paths`:
```python
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_name = os.path.splitext(os.path.basename(plugin_file_path))[0]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
doc_targets[plugin_type].append(data_context().content.prefix + plugin_name)
target_paths[plugin_type][data_context().content.prefix + plugin_name] = plugin_file_path
```
You may be fixing this issue up with something like the next code excerpt which is taking into consideration the whole module FQCN:
```python
module_path = os.path.join(data_context().content.module_path, '')
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_path_noext = os.path.splitext(plugin_file_path)[0]
plugin_name_parts = plugin_path_noext.replace(module_path, '', 1).split('/')
plugin_ctx, plugin_name = plugin_name_parts[:-1], plugin_name_parts[-1]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
if plugin_ctx:
plugin_full_ctx = data_context().content.prefix + '.'.join(plugin_ctx) + '.'
else:
plugin_full_ctx = data_context().content.prefix
plugin_full_name = plugin_full_ctx + plugin_name
doc_targets[plugin_type].append(plugin_full_name)
target_paths[plugin_type][plugin_full_name] = plugin_file_path
```
Thanks in advanced for your support.
Cheers
Jose M. Prieto
### Issue Type
Bug Report
### Component Name
ansible-test sanity --test ansible-doc
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/users/priejos/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/lib64/python3.8/site-packages/ansible
ansible collection location = /usr/sap/CEXIE/team/priejos
executable location = /usr/sap/CEXIE/team/priejos/ansible_collections/roche/saphana/venv/bin/ansible
python version = 3.8.11 (default, Jul 23 2021, 14:55:16) [GCC 9.1.1 20190605 (Red Hat 9.1.1-2)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
COLLECTIONS_PATHS(env: ANSIBLE_COLLECTIONS_PATH) = ['/usr/sap/CEXIE/team/priejos']
HOST_KEY_CHECKING(/etc/ansible/ansible.cfg) = False
CONNECTION:
==========
paramiko_ssh:
____________
host_key_checking(/etc/ansible/ansible.cfg) = False
ssh:
___
host_key_checking(/etc/ansible/ansible.cfg) = False
pipelining(/etc/ansible/ansible.cfg) = True
scp_if_ssh(/etc/ansible/ansible.cfg) = True
ssh_args(/etc/ansible/ansible.cfg) = -o ConnectionAttempts=20
```
### OS / Environment
$ lsb_release -a
LSB Version: :core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch
Distributor ID: RedHatEnterpriseServer
Description: Red Hat Enterprise Linux Server release 7.9 (Maipo)
Release: 7.9
Codename: Maipo
### Steps to Reproduce
1. Try to have a deep module structure like the one I pasted in the summary section
2. Execute the ansible-doc sanity check like `ansible-test sanity --test ansible-doc`
### Expected Results
The sanity check `ansible-doc` to recognize the right module FQCN in a deep structure like shown in section "Summary".
### Actual Results
```console
$ ansible-test sanity --test ansible-doc -v --debug
Creating container database.
Read 5 sanity test ignore line(s) for Ansible 2.13 from: tests/sanity/ignore-2.13.txt
Running sanity test "ansible-doc"
Initializing "/tmp/ansible-test-7fr64tiv-injector" as the temporary injector directory.
Run command: ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator
ERROR: Output on stderr from ansible-doc is considered an error.
Command "ansible-doc -t module priejos.saphana.certificates_info priejos.saphana.configurator priejos.saphana.editor priejos.saphana.editor priejos.saphana.executor priejos.saphana.info priejos.saphana.info priejos.saphana.install priejos.saphana.procedure priejos.saphana.query priejos.saphana.scheduler priejos.saphana.validator" returned exit status 0.
>>> Standard Error
[WARNING]: module priejos.saphana.certificates_info not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.configurator not found in:
/dev/null:/tmp/ansible-test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.editor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.executor not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.info not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.install not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.scheduler not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
[WARNING]: module priejos.saphana.validator not found in: /dev/null:/tmp/ansible-
test-q8hhje7a/ansible/modules
FATAL: The 1 sanity test(s) listed below (out of 1) failed. See error output above for details.
ansible-doc
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78490
|
https://github.com/ansible/ansible/pull/78518
|
9eb3d6811bfbdc9c776f2c549c642a9d8db3fa56
|
2b63fdd1b8dd8383a9b09b601d80623cd76ce579
| 2022-08-10T12:14:10Z |
python
| 2022-08-11T17:53:23Z |
test/lib/ansible_test/_internal/commands/sanity/ansible_doc.py
|
"""Sanity test for ansible-doc."""
from __future__ import annotations
import collections
import os
import re
from . import (
DOCUMENTABLE_PLUGINS,
SanitySingleVersion,
SanityFailure,
SanitySuccess,
SanityTargets,
SanityMessage,
)
from ...test import (
TestResult,
)
from ...target import (
TestTarget,
)
from ...util import (
SubprocessError,
display,
is_subdir,
)
from ...ansible_util import (
ansible_environment,
intercept_python,
)
from ...config import (
SanityConfig,
)
from ...data import (
data_context,
)
from ...host_configs import (
PythonConfig,
)
class AnsibleDocTest(SanitySingleVersion):
"""Sanity test for ansible-doc."""
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
plugin_paths = [plugin_path for plugin_type, plugin_path in data_context().content.plugin_paths.items() if plugin_type in DOCUMENTABLE_PLUGINS]
return [target for target in targets
if os.path.splitext(target.path)[1] == '.py'
and os.path.basename(target.path) != '__init__.py'
and any(is_subdir(target.path, path) for path in plugin_paths)
]
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
settings = self.load_processor(args)
paths = [target.path for target in targets.include]
doc_targets: dict[str, list[str]] = collections.defaultdict(list)
target_paths: dict[str, dict[str, str]] = collections.defaultdict(dict)
remap_types = dict(
modules='module',
)
for plugin_type, plugin_path in data_context().content.plugin_paths.items():
plugin_type = remap_types.get(plugin_type, plugin_type)
for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
plugin_name = os.path.splitext(os.path.basename(plugin_file_path))[0]
if plugin_name.startswith('_'):
plugin_name = plugin_name[1:]
doc_targets[plugin_type].append(data_context().content.prefix + plugin_name)
target_paths[plugin_type][data_context().content.prefix + plugin_name] = plugin_file_path
env = ansible_environment(args, color=False)
error_messages: list[SanityMessage] = []
for doc_type in sorted(doc_targets):
for format_option in [None, '--json']:
cmd = ['ansible-doc', '-t', doc_type]
if format_option is not None:
cmd.append(format_option)
cmd.extend(sorted(doc_targets[doc_type]))
try:
stdout, stderr = intercept_python(args, python, cmd, env, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if status:
summary = '%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr)
return SanityFailure(self.name, summary=summary)
if stdout:
display.info(stdout.strip(), verbosity=3)
if stderr:
# ignore removed module/plugin warnings
stderr = re.sub(r'\[WARNING]: [^ ]+ [^ ]+ has been removed\n', '', stderr).strip()
if stderr:
summary = 'Output on stderr from ansible-doc is considered an error.\n\n%s' % SubprocessError(cmd, stderr=stderr)
return SanityFailure(self.name, summary=summary)
if args.explain:
return SanitySuccess(self.name)
error_messages = settings.process_errors(error_messages, paths)
if error_messages:
return SanityFailure(self.name, messages=error_messages)
return SanitySuccess(self.name)
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,438 |
"ansible-config init" command generates ansible.cfg with missing commentouts
|
### Summary
I generated ansible.cfg using the `ansible-config init` command.
The `-f vars` argument was used to specify the output format, and commented out the output with the `--disabled` argument.
The result is a yaml format configuration with the array elements uncommented out.
It's probably not that important but I think this array element needs to be commented out.
```yaml
#ansible_facts_modules:
- smart
```
### Issue Type
Bug Report
### Component Name
ansible-config
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2]
config file = None
configured module search path = ['/home/nnstt1/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/nnstt1/.virtualenv/ansible/lib/python3.9/site-packages/ansible
ansible collection location = /home/nnstt1/.ansible/collections:/usr/share/ansible/collections
executable location = /home/nnstt1/.virtualenv/ansible/bin/ansible
python version = 3.9.13 (main, Aug 4 2022, 15:00:24) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
N/A
```
### OS / Environment
N/A
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```bash
$ ansible-config init -f vars --disabled
```
### Expected Results
```console
# Sets the output directory and filename prefix to generate coverage run info.(str): Sets the output directory on the remote host to generate coverage reports to.
#Currently only used for remote coverage on PowerShell modules.
#This is for internal use only.
#_ansible_coverage_remote_output: ''
# No syslog on target(boolean): Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts this will disable a newer style PowerShell modules from writting to the event log.
#ansible_no_target_syslog: false
# Gather Facts Modules(list): Which modules to run during a play's fact gathering stage, using the default of 'smart' will try to figure it out based on connection type.
#If adding your own modules but you still want to use the default Ansible facts, you will want to include 'setup' or corresponding network module to the list (if you add 'smart', Ansible will also figur
#This does not affect explicit calls to the 'setup' module, but does always affect the 'gather_facts' action (implicit or explicit).
#ansible_facts_modules:
#- smart
# Python interpreter path (or automatic discovery behavior) used for module execution(string): Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery
#ansible_python_interpreter: auto
# Adjust maximum file descriptor soft limit during Python module execution(string): Attempts to set RLIMIT_NOFILE soft limit to the specified value when executing Python modules (can speed up subprocess
#ansible_python_module_rlimit_nofile: 0
# Windows Async Startup Timeout(integer): For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling), this is how long, in seconds, to wait for the task spawned by Ansible to connec
#This is not the total time an async command can run for, but is a separate timeout to wait for an async command to start. The task will only start to be timed against its async_timeout once it has conn
#ansible_win_async_startup_timeout: 5
```
### Actual Results
```console
# Sets the output directory and filename prefix to generate coverage run info.(str): Sets the output directory on the remote host to generate coverage reports to.
#Currently only used for remote coverage on PowerShell modules.
#This is for internal use only.
#_ansible_coverage_remote_output: ''
# No syslog on target(boolean): Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts this will disable a newer style PowerShell modules from writting to the event log.
#ansible_no_target_syslog: false
# Gather Facts Modules(list): Which modules to run during a play's fact gathering stage, using the default of 'smart' will try to figure it out based on connection type.
#If adding your own modules but you still want to use the default Ansible facts, you will want to include 'setup' or corresponding network module to the list (if you add 'smart', Ansible will also figur
#This does not affect explicit calls to the 'setup' module, but does always affect the 'gather_facts' action (implicit or explicit).
#ansible_facts_modules:
- smart
# Python interpreter path (or automatic discovery behavior) used for module execution(string): Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery
#ansible_python_interpreter: auto
# Adjust maximum file descriptor soft limit during Python module execution(string): Attempts to set RLIMIT_NOFILE soft limit to the specified value when executing Python modules (can speed up subprocess
#ansible_python_module_rlimit_nofile: 0
# Windows Async Startup Timeout(integer): For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling), this is how long, in seconds, to wait for the task spawned by Ansible to connec
#This is not the total time an async command can run for, but is a separate timeout to wait for an async command to start. The task will only start to be timed against its async_timeout once it has conn
#ansible_win_async_startup_timeout: 5
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78438
|
https://github.com/ansible/ansible/pull/78453
|
2729088f2aa23a19fa3a561d7bf94770d8278477
|
740864869ef589d453ac8cdadd6017e84c1ff6ab
| 2022-08-04T07:10:53Z |
python
| 2022-08-11T19:20:59Z |
changelogs/fragments/fix_init_commented.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,438 |
"ansible-config init" command generates ansible.cfg with missing commentouts
|
### Summary
I generated ansible.cfg using the `ansible-config init` command.
The `-f vars` argument was used to specify the output format, and commented out the output with the `--disabled` argument.
The result is a yaml format configuration with the array elements uncommented out.
It's probably not that important but I think this array element needs to be commented out.
```yaml
#ansible_facts_modules:
- smart
```
### Issue Type
Bug Report
### Component Name
ansible-config
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.2]
config file = None
configured module search path = ['/home/nnstt1/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/nnstt1/.virtualenv/ansible/lib/python3.9/site-packages/ansible
ansible collection location = /home/nnstt1/.ansible/collections:/usr/share/ansible/collections
executable location = /home/nnstt1/.virtualenv/ansible/bin/ansible
python version = 3.9.13 (main, Aug 4 2022, 15:00:24) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
N/A
```
### OS / Environment
N/A
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```bash
$ ansible-config init -f vars --disabled
```
### Expected Results
```console
# Sets the output directory and filename prefix to generate coverage run info.(str): Sets the output directory on the remote host to generate coverage reports to.
#Currently only used for remote coverage on PowerShell modules.
#This is for internal use only.
#_ansible_coverage_remote_output: ''
# No syslog on target(boolean): Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts this will disable a newer style PowerShell modules from writting to the event log.
#ansible_no_target_syslog: false
# Gather Facts Modules(list): Which modules to run during a play's fact gathering stage, using the default of 'smart' will try to figure it out based on connection type.
#If adding your own modules but you still want to use the default Ansible facts, you will want to include 'setup' or corresponding network module to the list (if you add 'smart', Ansible will also figur
#This does not affect explicit calls to the 'setup' module, but does always affect the 'gather_facts' action (implicit or explicit).
#ansible_facts_modules:
#- smart
# Python interpreter path (or automatic discovery behavior) used for module execution(string): Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery
#ansible_python_interpreter: auto
# Adjust maximum file descriptor soft limit during Python module execution(string): Attempts to set RLIMIT_NOFILE soft limit to the specified value when executing Python modules (can speed up subprocess
#ansible_python_module_rlimit_nofile: 0
# Windows Async Startup Timeout(integer): For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling), this is how long, in seconds, to wait for the task spawned by Ansible to connec
#This is not the total time an async command can run for, but is a separate timeout to wait for an async command to start. The task will only start to be timed against its async_timeout once it has conn
#ansible_win_async_startup_timeout: 5
```
### Actual Results
```console
# Sets the output directory and filename prefix to generate coverage run info.(str): Sets the output directory on the remote host to generate coverage reports to.
#Currently only used for remote coverage on PowerShell modules.
#This is for internal use only.
#_ansible_coverage_remote_output: ''
# No syslog on target(boolean): Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts this will disable a newer style PowerShell modules from writting to the event log.
#ansible_no_target_syslog: false
# Gather Facts Modules(list): Which modules to run during a play's fact gathering stage, using the default of 'smart' will try to figure it out based on connection type.
#If adding your own modules but you still want to use the default Ansible facts, you will want to include 'setup' or corresponding network module to the list (if you add 'smart', Ansible will also figur
#This does not affect explicit calls to the 'setup' module, but does always affect the 'gather_facts' action (implicit or explicit).
#ansible_facts_modules:
- smart
# Python interpreter path (or automatic discovery behavior) used for module execution(string): Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery
#ansible_python_interpreter: auto
# Adjust maximum file descriptor soft limit during Python module execution(string): Attempts to set RLIMIT_NOFILE soft limit to the specified value when executing Python modules (can speed up subprocess
#ansible_python_module_rlimit_nofile: 0
# Windows Async Startup Timeout(integer): For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling), this is how long, in seconds, to wait for the task spawned by Ansible to connec
#This is not the total time an async command can run for, but is a separate timeout to wait for an async command to start. The task will only start to be timed against its async_timeout once it has conn
#ansible_win_async_startup_timeout: 5
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78438
|
https://github.com/ansible/ansible/pull/78453
|
2729088f2aa23a19fa3a561d7bf94770d8278477
|
740864869ef589d453ac8cdadd6017e84c1ff6ab
| 2022-08-04T07:10:53Z |
python
| 2022-08-11T19:20:59Z |
lib/ansible/cli/config.py
|
#!/usr/bin/env python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
import os
import yaml
import shlex
import subprocess
from collections.abc import Mapping
from ansible import context
import ansible.plugins.loader as plugin_loader
from ansible import constants as C
from ansible.cli.arguments import option_helpers as opt_help
from ansible.config.manager import ConfigManager, Setting
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils._text import to_native, to_text, to_bytes
from ansible.module_utils.common.json import json_dump
from ansible.module_utils.six import string_types
from ansible.parsing.quoting import is_quoted
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.utils.color import stringc
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath
display = Display()
def yaml_dump(data, default_flow_style=True):
return yaml.dump(data, Dumper=AnsibleDumper, default_flow_style=default_flow_style)
def get_constants():
''' helper method to ensure we can template based on existing constants '''
if not hasattr(get_constants, 'cvars'):
get_constants.cvars = {k: getattr(C, k) for k in dir(C) if not k.startswith('__')}
return get_constants.cvars
class ConfigCLI(CLI):
""" Config command line class """
name = 'ansible-config'
def __init__(self, args, callback=None):
self.config_file = None
self.config = None
super(ConfigCLI, self).__init__(args, callback)
def init_parser(self):
super(ConfigCLI, self).init_parser(
desc="View ansible configuration.",
)
common = opt_help.argparse.ArgumentParser(add_help=False)
opt_help.add_verbosity_options(common)
common.add_argument('-c', '--config', dest='config_file',
help="path to configuration file, defaults to first file found in precedence.")
common.add_argument("-t", "--type", action="store", default='base', dest='type', choices=['all', 'base'] + list(C.CONFIGURABLE_PLUGINS),
help="Filter down to a specific plugin type.")
common.add_argument('args', help='Specific plugin to target, requires type of plugin to be set', nargs='*')
subparsers = self.parser.add_subparsers(dest='action')
subparsers.required = True
list_parser = subparsers.add_parser('list', help='Print all config options', parents=[common])
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument('--format', '-f', dest='format', action='store', choices=['json', 'yaml'], default='yaml',
help='Output format for list')
dump_parser = subparsers.add_parser('dump', help='Dump configuration', parents=[common])
dump_parser.set_defaults(func=self.execute_dump)
dump_parser.add_argument('--only-changed', '--changed-only', dest='only_changed', action='store_true',
help="Only show configurations that have changed from the default")
dump_parser.add_argument('--format', '-f', dest='format', action='store', choices=['json', 'yaml', 'display'], default='display',
help='Output format for dump')
view_parser = subparsers.add_parser('view', help='View configuration file', parents=[common])
view_parser.set_defaults(func=self.execute_view)
init_parser = subparsers.add_parser('init', help='Create initial configuration', parents=[common])
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--format', '-f', dest='format', action='store', choices=['ini', 'env', 'vars'], default='ini',
help='Output format for init')
init_parser.add_argument('--disabled', dest='commented', action='store_true', default=False,
help='Prefixes all entries with a comment character to disable them')
# search_parser = subparsers.add_parser('find', help='Search configuration')
# search_parser.set_defaults(func=self.execute_search)
# search_parser.add_argument('args', help='Search term', metavar='<search term>')
def post_process_args(self, options):
options = super(ConfigCLI, self).post_process_args(options)
display.verbosity = options.verbosity
return options
def run(self):
super(ConfigCLI, self).run()
if context.CLIARGS['config_file']:
self.config_file = unfrackpath(context.CLIARGS['config_file'], follow=False)
b_config = to_bytes(self.config_file)
if os.path.exists(b_config) and os.access(b_config, os.R_OK):
self.config = ConfigManager(self.config_file)
else:
raise AnsibleOptionsError('The provided configuration file is missing or not accessible: %s' % to_native(self.config_file))
else:
self.config = C.config
self.config_file = self.config._config_file
if self.config_file:
try:
if not os.path.exists(self.config_file):
raise AnsibleOptionsError("%s does not exist or is not accessible" % (self.config_file))
elif not os.path.isfile(self.config_file):
raise AnsibleOptionsError("%s is not a valid file" % (self.config_file))
os.environ['ANSIBLE_CONFIG'] = to_native(self.config_file)
except Exception:
if context.CLIARGS['action'] in ['view']:
raise
elif context.CLIARGS['action'] in ['edit', 'update']:
display.warning("File does not exist, used empty file: %s" % self.config_file)
elif context.CLIARGS['action'] == 'view':
raise AnsibleError('Invalid or no config file was supplied')
# run the requested action
context.CLIARGS['func']()
def execute_update(self):
'''
Updates a single setting in the specified ansible.cfg
'''
raise AnsibleError("Option not implemented yet")
# pylint: disable=unreachable
if context.CLIARGS['setting'] is None:
raise AnsibleOptionsError("update option requires a setting to update")
(entry, value) = context.CLIARGS['setting'].split('=')
if '.' in entry:
(section, option) = entry.split('.')
else:
section = 'defaults'
option = entry
subprocess.call([
'ansible',
'-m', 'ini_file',
'localhost',
'-c', 'local',
'-a', '"dest=%s section=%s option=%s value=%s backup=yes"' % (self.config_file, section, option, value)
])
def execute_view(self):
'''
Displays the current config file
'''
try:
with open(self.config_file, 'rb') as f:
self.pager(to_text(f.read(), errors='surrogate_or_strict'))
except Exception as e:
raise AnsibleError("Failed to open config file: %s" % to_native(e))
def execute_edit(self):
'''
Opens ansible.cfg in the default EDITOR
'''
raise AnsibleError("Option not implemented yet")
# pylint: disable=unreachable
try:
editor = shlex.split(os.environ.get('EDITOR', 'vi'))
editor.append(self.config_file)
subprocess.call(editor)
except Exception as e:
raise AnsibleError("Failed to open editor: %s" % to_native(e))
def _list_plugin_settings(self, ptype, plugins=None):
entries = {}
loader = getattr(plugin_loader, '%s_loader' % ptype)
# build list
if plugins:
plugin_cs = []
for plugin in plugins:
p = loader.get(plugin, class_only=True)
if p is None:
display.warning("Skipping %s as we could not find matching plugin" % plugin)
else:
plugin_cs.append(p)
else:
plugin_cs = loader.all(class_only=True)
# iterate over class instances
for plugin in plugin_cs:
finalname = name = plugin._load_name
if name.startswith('_'):
# alias or deprecated
if os.path.islink(plugin._original_path):
continue
else:
finalname = name.replace('_', '', 1) + ' (DEPRECATED)'
entries[finalname] = self.config.get_configuration_definitions(ptype, name)
return entries
def _list_entries_from_args(self):
'''
build a dict with the list requested configs
'''
config_entries = {}
if context.CLIARGS['type'] in ('base', 'all'):
# this dumps main/common configs
config_entries = self.config.get_configuration_definitions(ignore_private=True)
if context.CLIARGS['type'] != 'base':
config_entries['PLUGINS'] = {}
if context.CLIARGS['type'] == 'all':
# now each plugin type
for ptype in C.CONFIGURABLE_PLUGINS:
config_entries['PLUGINS'][ptype.upper()] = self._list_plugin_settings(ptype)
elif context.CLIARGS['type'] != 'base':
config_entries['PLUGINS'][context.CLIARGS['type']] = self._list_plugin_settings(context.CLIARGS['type'], context.CLIARGS['args'])
return config_entries
def execute_list(self):
'''
list and output available configs
'''
config_entries = self._list_entries_from_args()
if context.CLIARGS['format'] == 'yaml':
output = yaml_dump(config_entries)
elif context.CLIARGS['format'] == 'json':
output = json_dump(config_entries)
self.pager(to_text(output, errors='surrogate_or_strict'))
def _get_settings_vars(self, settings, subkey):
data = []
if context.CLIARGS['commented']:
prefix = '#'
else:
prefix = ''
for setting in settings:
if not settings[setting].get('description'):
continue
default = settings[setting].get('default', '')
if subkey == 'env':
stype = settings[setting].get('type', '')
if stype == 'boolean':
if default:
default = '1'
else:
default = '0'
elif default:
if stype == 'list':
if not isinstance(default, string_types):
# python lists are not valid env ones
try:
default = ', '.join(default)
except Exception as e:
# list of other stuff
default = '%s' % to_native(default)
if isinstance(default, string_types) and not is_quoted(default):
default = shlex.quote(default)
elif default is None:
default = ''
if subkey in settings[setting] and settings[setting][subkey]:
entry = settings[setting][subkey][-1]['name']
if isinstance(settings[setting]['description'], string_types):
desc = settings[setting]['description']
else:
desc = '\n#'.join(settings[setting]['description'])
name = settings[setting].get('name', setting)
data.append('# %s(%s): %s' % (name, settings[setting].get('type', 'string'), desc))
# TODO: might need quoting and value coercion depending on type
if subkey == 'env':
data.append('%s%s=%s' % (prefix, entry, default))
elif subkey == 'vars':
data.append(prefix + to_text(yaml_dump({entry: default}, default_flow_style=False), errors='surrogate_or_strict'))
data.append('')
return data
def _get_settings_ini(self, settings):
sections = {}
for o in sorted(settings.keys()):
opt = settings[o]
if not isinstance(opt, Mapping):
# recursed into one of the few settings that is a mapping, now hitting it's strings
continue
if not opt.get('description'):
# its a plugin
new_sections = self._get_settings_ini(opt)
for s in new_sections:
if s in sections:
sections[s].extend(new_sections[s])
else:
sections[s] = new_sections[s]
continue
if isinstance(opt['description'], string_types):
desc = '# (%s) %s' % (opt.get('type', 'string'), opt['description'])
else:
desc = "# (%s) " % opt.get('type', 'string')
desc += "\n# ".join(opt['description'])
if 'ini' in opt and opt['ini']:
entry = opt['ini'][-1]
if entry['section'] not in sections:
sections[entry['section']] = []
default = opt.get('default', '')
if opt.get('type', '') == 'list' and not isinstance(default, string_types):
# python lists are not valid ini ones
default = ', '.join(default)
elif default is None:
default = ''
if context.CLIARGS['commented']:
entry['key'] = ';%s' % entry['key']
key = desc + '\n%s=%s' % (entry['key'], default)
sections[entry['section']].append(key)
return sections
def execute_init(self):
data = []
config_entries = self._list_entries_from_args()
plugin_types = config_entries.pop('PLUGINS', None)
if context.CLIARGS['format'] == 'ini':
sections = self._get_settings_ini(config_entries)
if plugin_types:
for ptype in plugin_types:
plugin_sections = self._get_settings_ini(plugin_types[ptype])
for s in plugin_sections:
if s in sections:
sections[s].extend(plugin_sections[s])
else:
sections[s] = plugin_sections[s]
if sections:
for section in sections.keys():
data.append('[%s]' % section)
for key in sections[section]:
data.append(key)
data.append('')
data.append('')
elif context.CLIARGS['format'] in ('env', 'vars'): # TODO: add yaml once that config option is added
data = self._get_settings_vars(config_entries, context.CLIARGS['format'])
if plugin_types:
for ptype in plugin_types:
for plugin in plugin_types[ptype].keys():
data.extend(self._get_settings_vars(plugin_types[ptype][plugin], context.CLIARGS['format']))
self.pager(to_text('\n'.join(data), errors='surrogate_or_strict'))
def _render_settings(self, config):
entries = []
for setting in sorted(config):
changed = (config[setting].origin not in ('default', 'REQUIRED'))
if context.CLIARGS['format'] == 'display':
if isinstance(config[setting], Setting):
# proceed normally
if config[setting].origin == 'default':
color = 'green'
elif config[setting].origin == 'REQUIRED':
# should include '_terms', '_input', etc
color = 'red'
else:
color = 'yellow'
msg = "%s(%s) = %s" % (setting, config[setting].origin, config[setting].value)
else:
color = 'green'
msg = "%s(%s) = %s" % (setting, 'default', config[setting].get('default'))
entry = stringc(msg, color)
else:
entry = {}
for key in config[setting]._fields:
entry[key] = getattr(config[setting], key)
if not context.CLIARGS['only_changed'] or changed:
entries.append(entry)
return entries
def _get_global_configs(self):
config = self.config.get_configuration_definitions(ignore_private=True).copy()
for setting in config.keys():
v, o = C.config.get_config_value_and_origin(setting, cfile=self.config_file, variables=get_constants())
config[setting] = Setting(setting, v, o, None)
return self._render_settings(config)
def _get_plugin_configs(self, ptype, plugins):
# prep loading
loader = getattr(plugin_loader, '%s_loader' % ptype)
# acumulators
output = []
config_entries = {}
# build list
if plugins:
plugin_cs = []
for plugin in plugins:
p = loader.get(plugin, class_only=True)
if p is None:
display.warning("Skipping %s as we could not find matching plugin" % plugin)
else:
plugin_cs.append(loader.get(plugin, class_only=True))
else:
plugin_cs = loader.all(class_only=True)
for plugin in plugin_cs:
# in case of deprecastion they diverge
finalname = name = plugin._load_name
if name.startswith('_'):
if os.path.islink(plugin._original_path):
# skip alias
continue
# deprecated, but use 'nice name'
finalname = name.replace('_', '', 1) + ' (DEPRECATED)'
# default entries per plugin
config_entries[finalname] = self.config.get_configuration_definitions(ptype, name)
try:
# populate config entries by loading plugin
dump = loader.get(name, class_only=True)
except Exception as e:
display.warning('Skipping "%s" %s plugin, as we cannot load plugin to check config due to : %s' % (name, ptype, to_native(e)))
continue
# actually get the values
for setting in config_entries[finalname].keys():
try:
v, o = C.config.get_config_value_and_origin(setting, cfile=self.config_file, plugin_type=ptype, plugin_name=name, variables=get_constants())
except AnsibleError as e:
if to_text(e).startswith('No setting was provided for required configuration'):
v = None
o = 'REQUIRED'
else:
raise e
if v is None and o is None:
# not all cases will be error
o = 'REQUIRED'
config_entries[finalname][setting] = Setting(setting, v, o, None)
# pretty please!
results = self._render_settings(config_entries[finalname])
if results:
if context.CLIARGS['format'] == 'display':
# avoid header for empty lists (only changed!)
output.append('\n%s:\n%s' % (finalname, '_' * len(finalname)))
output.extend(results)
else:
output.append({finalname: results})
return output
def execute_dump(self):
'''
Shows the current settings, merges ansible.cfg if specified
'''
if context.CLIARGS['type'] == 'base':
# deal with base
output = self._get_global_configs()
elif context.CLIARGS['type'] == 'all':
# deal with base
output = self._get_global_configs()
# deal with plugins
for ptype in C.CONFIGURABLE_PLUGINS:
plugin_list = self._get_plugin_configs(ptype, context.CLIARGS['args'])
if context.CLIARGS['format'] == 'display':
if not context.CLIARGS['only_changed'] or plugin_list:
output.append('\n%s:\n%s' % (ptype.upper(), '=' * len(ptype)))
output.extend(plugin_list)
else:
if ptype in ('modules', 'doc_fragments'):
pname = ptype.upper()
else:
pname = '%s_PLUGINS' % ptype.upper()
output.append({pname: plugin_list})
else:
# deal with plugins
output = self._get_plugin_configs(context.CLIARGS['type'], context.CLIARGS['args'])
if context.CLIARGS['format'] == 'display':
text = '\n'.join(output)
if context.CLIARGS['format'] == 'yaml':
text = yaml_dump(output)
elif context.CLIARGS['format'] == 'json':
text = json_dump(output)
self.pager(to_text(text, errors='surrogate_or_strict'))
def main(args=None):
ConfigCLI.cli_executor(args)
if __name__ == '__main__':
main()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,493 |
Unable to set `null_representation` config
|
### Summary
Trying to set the `null_representation` (https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-null-representation) settings results in something like:
```text
ansible.errors.AnsibleError: Invalid settings supplied for DEFAULT_NULL_REPRESENTATION: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
. Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
```
https://github.com/ansible/ansible/blob/v2.12.1/lib/ansible/config/base.yml#L952 specifies `type: none`
and https://github.com/ansible/ansible/blob/v2.12.1/lib/ansible/config/manager.py#L99 enforces this type, but this wasn't the case in early commits as I understand.
Some history I could find:
* https://github.com/ansible/ansible/commit/892e230514090dc9221ee01d289c3532aa6ef260
* https://github.com/ansible/ansible/commit/9f6bbf8c2f02cdc70df149095a6623ee0c2c7ba7
* https://github.com/ansible/ansible/commit/74842adc07edb248f9b544389ce4093b9149f195
Also, could this evolve so that the config value could be overridden by local variables and/or by '#jinja2:' (by setting this value in the Jinja environment in this case), and not just globally (https://github.com/ansible/ansible/blob/v2.11.6/lib/ansible/template/__init__.py#L809)?
### Issue Type
Bug Report
### Component Name
config
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.7]
python version = 3.8
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
CentOS 7
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
* Create ansible.cfg with this content:
```yaml (paste below)
[defaults]
null_representation = null
```
* Run `ansible-config dump --only-changed`
### Expected Results
Something like:
```text
$ ansible-config dump --only-changed
DEFAULT_NULL_REPRESENTATION(/my/ansible.cfg) = null
```
### Actual Results
```console
Unhandled error:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 543, in get_config_value_and_origin
value = ensure_type(value, defs[config].get('type'), origin=origin)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 162, in ensure_type
raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
ValueError: Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 605, in update_config_data
value, origin = self.get_config_value_and_origin(config, configfile)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 550, in get_config_value_and_origin
raise AnsibleOptionsError('Invalid type for configuration option %s: %s' %
ansible.errors.AnsibleOptionsError: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 543, in get_config_value_and_origin
value = ensure_type(value, defs[config].get('type'), origin=origin)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 162, in ensure_type
raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
ValueError: Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 605, in update_config_data
value, origin = self.get_config_value_and_origin(config, configfile)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 550, in get_config_value_and_origin
raise AnsibleOptionsError('Invalid type for configuration option %s: %s' %
ansible.errors.AnsibleOptionsError: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/bin/ansible-config", line 63, in <module>
import ansible.constants as C
File "/my/venv/lib/python3.8/site-packages/ansible/constants.py", line 181, in <module>
config = ConfigManager()
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 310, in __init__
self.update_config_data()
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 617, in update_config_data
raise AnsibleError("Invalid settings supplied for %s: %s\n" % (config, to_native(e)), orig_exc=e)
ansible.errors.AnsibleError: Invalid settings supplied for DEFAULT_NULL_REPRESENTATION: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
. Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76493
|
https://github.com/ansible/ansible/pull/78451
|
3a59cb25f486eb6c633995f5fe01413c0ed42116
|
0de44804679461b8d898129068183d6da416e3a7
| 2021-12-07T18:08:25Z |
python
| 2022-08-11T20:07:57Z |
changelogs/fragments/null_means_none.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,493 |
Unable to set `null_representation` config
|
### Summary
Trying to set the `null_representation` (https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-null-representation) settings results in something like:
```text
ansible.errors.AnsibleError: Invalid settings supplied for DEFAULT_NULL_REPRESENTATION: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
. Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
```
https://github.com/ansible/ansible/blob/v2.12.1/lib/ansible/config/base.yml#L952 specifies `type: none`
and https://github.com/ansible/ansible/blob/v2.12.1/lib/ansible/config/manager.py#L99 enforces this type, but this wasn't the case in early commits as I understand.
Some history I could find:
* https://github.com/ansible/ansible/commit/892e230514090dc9221ee01d289c3532aa6ef260
* https://github.com/ansible/ansible/commit/9f6bbf8c2f02cdc70df149095a6623ee0c2c7ba7
* https://github.com/ansible/ansible/commit/74842adc07edb248f9b544389ce4093b9149f195
Also, could this evolve so that the config value could be overridden by local variables and/or by '#jinja2:' (by setting this value in the Jinja environment in this case), and not just globally (https://github.com/ansible/ansible/blob/v2.11.6/lib/ansible/template/__init__.py#L809)?
### Issue Type
Bug Report
### Component Name
config
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.7]
python version = 3.8
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
CentOS 7
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
* Create ansible.cfg with this content:
```yaml (paste below)
[defaults]
null_representation = null
```
* Run `ansible-config dump --only-changed`
### Expected Results
Something like:
```text
$ ansible-config dump --only-changed
DEFAULT_NULL_REPRESENTATION(/my/ansible.cfg) = null
```
### Actual Results
```console
Unhandled error:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 543, in get_config_value_and_origin
value = ensure_type(value, defs[config].get('type'), origin=origin)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 162, in ensure_type
raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
ValueError: Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 605, in update_config_data
value, origin = self.get_config_value_and_origin(config, configfile)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 550, in get_config_value_and_origin
raise AnsibleOptionsError('Invalid type for configuration option %s: %s' %
ansible.errors.AnsibleOptionsError: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 543, in get_config_value_and_origin
value = ensure_type(value, defs[config].get('type'), origin=origin)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 162, in ensure_type
raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
ValueError: Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 605, in update_config_data
value, origin = self.get_config_value_and_origin(config, configfile)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 550, in get_config_value_and_origin
raise AnsibleOptionsError('Invalid type for configuration option %s: %s' %
ansible.errors.AnsibleOptionsError: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/bin/ansible-config", line 63, in <module>
import ansible.constants as C
File "/my/venv/lib/python3.8/site-packages/ansible/constants.py", line 181, in <module>
config = ConfigManager()
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 310, in __init__
self.update_config_data()
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 617, in update_config_data
raise AnsibleError("Invalid settings supplied for %s: %s\n" % (config, to_native(e)), orig_exc=e)
ansible.errors.AnsibleError: Invalid settings supplied for DEFAULT_NULL_REPRESENTATION: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
. Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76493
|
https://github.com/ansible/ansible/pull/78451
|
3a59cb25f486eb6c633995f5fe01413c0ed42116
|
0de44804679461b8d898129068183d6da416e3a7
| 2021-12-07T18:08:25Z |
python
| 2022-08-11T20:07:57Z |
lib/ansible/config/base.yml
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
---
ANSIBLE_HOME:
name: The Ansible home path
description:
- The default root path for Ansible config files on the controller.
default: ~/.ansible
env:
- name: ANSIBLE_HOME
ini:
- key: home
section: defaults
type: path
version_added: '2.14'
ANSIBLE_CONNECTION_PATH:
name: Path of ansible-connection script
default: null
description:
- Specify where to look for the ansible-connection script. This location will be checked before searching $PATH.
- If null, ansible will start with the same directory as the ansible script.
type: path
env: [{name: ANSIBLE_CONNECTION_PATH}]
ini:
- {key: ansible_connection_path, section: persistent_connection}
yaml: {key: persistent_connection.ansible_connection_path}
version_added: "2.8"
ANSIBLE_COW_SELECTION:
name: Cowsay filter selection
default: default
description: This allows you to chose a specific cowsay stencil for the banners or use 'random' to cycle through them.
env: [{name: ANSIBLE_COW_SELECTION}]
ini:
- {key: cow_selection, section: defaults}
ANSIBLE_COW_ACCEPTLIST:
name: Cowsay filter acceptance list
default: ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant', 'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep', 'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder', 'vader-koala', 'vader', 'www']
description: Accept list of cowsay templates that are 'safe' to use, set to empty list if you want to enable all installed templates.
env:
- name: ANSIBLE_COW_WHITELIST
deprecated:
why: normalizing names to new standard
version: "2.15"
alternatives: 'ANSIBLE_COW_ACCEPTLIST'
- name: ANSIBLE_COW_ACCEPTLIST
version_added: '2.11'
ini:
- key: cow_whitelist
section: defaults
deprecated:
why: normalizing names to new standard
version: "2.15"
alternatives: 'cowsay_enabled_stencils'
- key: cowsay_enabled_stencils
section: defaults
version_added: '2.11'
type: list
ANSIBLE_FORCE_COLOR:
name: Force color output
default: False
description: This option forces color mode even when running without a TTY or the "nocolor" setting is True.
env: [{name: ANSIBLE_FORCE_COLOR}]
ini:
- {key: force_color, section: defaults}
type: boolean
yaml: {key: display.force_color}
ANSIBLE_NOCOLOR:
name: Suppress color output
default: False
description: This setting allows suppressing colorizing output, which is used to give a better indication of failure and status information.
env:
- name: ANSIBLE_NOCOLOR
# this is generic convention for CLI programs
- name: NO_COLOR
version_added: '2.11'
ini:
- {key: nocolor, section: defaults}
type: boolean
yaml: {key: display.nocolor}
ANSIBLE_NOCOWS:
name: Suppress cowsay output
default: False
description: If you have cowsay installed but want to avoid the 'cows' (why????), use this.
env: [{name: ANSIBLE_NOCOWS}]
ini:
- {key: nocows, section: defaults}
type: boolean
yaml: {key: display.i_am_no_fun}
ANSIBLE_COW_PATH:
name: Set path to cowsay command
default: null
description: Specify a custom cowsay path or swap in your cowsay implementation of choice
env: [{name: ANSIBLE_COW_PATH}]
ini:
- {key: cowpath, section: defaults}
type: string
yaml: {key: display.cowpath}
ANSIBLE_PIPELINING:
name: Connection pipelining
default: False
description:
- This is a global option, each connection plugin can override either by having more specific options or not supporting pipelining at all.
- Pipelining, if supported by the connection plugin, reduces the number of network operations required to execute a module on the remote server,
by executing many Ansible modules without actual file transfer.
- It can result in a very significant performance improvement when enabled.
- "However this conflicts with privilege escalation (become). For example, when using 'sudo:' operations you must first
disable 'requiretty' in /etc/sudoers on all managed hosts, which is why it is disabled by default."
- This setting will be disabled if ``ANSIBLE_KEEP_REMOTE_FILES`` is enabled.
env:
- name: ANSIBLE_PIPELINING
ini:
- section: defaults
key: pipelining
- section: connection
key: pipelining
type: boolean
ANY_ERRORS_FATAL:
name: Make Task failures fatal
default: False
description: Sets the default value for the any_errors_fatal keyword, if True, Task failures will be considered fatal errors.
env:
- name: ANSIBLE_ANY_ERRORS_FATAL
ini:
- section: defaults
key: any_errors_fatal
type: boolean
yaml: {key: errors.any_task_errors_fatal}
version_added: "2.4"
BECOME_ALLOW_SAME_USER:
name: Allow becoming the same user
default: False
description:
- This setting controls if become is skipped when remote user and become user are the same. I.E root sudo to root.
- If executable, it will be run and the resulting stdout will be used as the password.
env: [{name: ANSIBLE_BECOME_ALLOW_SAME_USER}]
ini:
- {key: become_allow_same_user, section: privilege_escalation}
type: boolean
yaml: {key: privilege_escalation.become_allow_same_user}
BECOME_PASSWORD_FILE:
name: Become password file
default: ~
description:
- 'The password file to use for the become plugin. --become-password-file.'
- If executable, it will be run and the resulting stdout will be used as the password.
env: [{name: ANSIBLE_BECOME_PASSWORD_FILE}]
ini:
- {key: become_password_file, section: defaults}
type: path
version_added: '2.12'
AGNOSTIC_BECOME_PROMPT:
name: Display an agnostic become prompt
default: True
type: boolean
description: Display an agnostic become prompt instead of displaying a prompt containing the command line supplied become method
env: [{name: ANSIBLE_AGNOSTIC_BECOME_PROMPT}]
ini:
- {key: agnostic_become_prompt, section: privilege_escalation}
yaml: {key: privilege_escalation.agnostic_become_prompt}
version_added: "2.5"
CACHE_PLUGIN:
name: Persistent Cache plugin
default: memory
description: Chooses which cache plugin to use, the default 'memory' is ephemeral.
env: [{name: ANSIBLE_CACHE_PLUGIN}]
ini:
- {key: fact_caching, section: defaults}
yaml: {key: facts.cache.plugin}
CACHE_PLUGIN_CONNECTION:
name: Cache Plugin URI
default: ~
description: Defines connection or path information for the cache plugin
env: [{name: ANSIBLE_CACHE_PLUGIN_CONNECTION}]
ini:
- {key: fact_caching_connection, section: defaults}
yaml: {key: facts.cache.uri}
CACHE_PLUGIN_PREFIX:
name: Cache Plugin table prefix
default: ansible_facts
description: Prefix to use for cache plugin files/tables
env: [{name: ANSIBLE_CACHE_PLUGIN_PREFIX}]
ini:
- {key: fact_caching_prefix, section: defaults}
yaml: {key: facts.cache.prefix}
CACHE_PLUGIN_TIMEOUT:
name: Cache Plugin expiration timeout
default: 86400
description: Expiration timeout for the cache plugin data
env: [{name: ANSIBLE_CACHE_PLUGIN_TIMEOUT}]
ini:
- {key: fact_caching_timeout, section: defaults}
type: integer
yaml: {key: facts.cache.timeout}
COLLECTIONS_SCAN_SYS_PATH:
name: Scan PYTHONPATH for installed collections
description: A boolean to enable or disable scanning the sys.path for installed collections
default: true
type: boolean
env:
- {name: ANSIBLE_COLLECTIONS_SCAN_SYS_PATH}
ini:
- {key: collections_scan_sys_path, section: defaults}
COLLECTIONS_PATHS:
name: ordered list of root paths for loading installed Ansible collections content
description: >
Colon separated paths in which Ansible will search for collections content.
Collections must be in nested *subdirectories*, not directly in these directories.
For example, if ``COLLECTIONS_PATHS`` includes ``'{{ ANSIBLE_HOME ~ "/collections" }}'``,
and you want to add ``my.collection`` to that directory, it must be saved as
``'{{ ANSIBLE_HOME} ~ "/collections/ansible_collections/my/collection" }}'``.
default: '{{ ANSIBLE_HOME ~ "/collections:/usr/share/ansible/collections" }}'
type: pathspec
env:
- name: ANSIBLE_COLLECTIONS_PATHS # TODO: Deprecate this and ini once PATH has been in a few releases.
- name: ANSIBLE_COLLECTIONS_PATH
version_added: '2.10'
ini:
- key: collections_paths
section: defaults
- key: collections_path
section: defaults
version_added: '2.10'
COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH:
name: Defines behavior when loading a collection that does not support the current Ansible version
description:
- When a collection is loaded that does not support the running Ansible version (with the collection metadata key `requires_ansible`).
env: [{name: ANSIBLE_COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH}]
ini: [{key: collections_on_ansible_version_mismatch, section: defaults}]
choices: &basic_error
error: issue a 'fatal' error and stop the play
warning: issue a warning but continue
ignore: just continue silently
default: warning
_COLOR_DEFAULTS: &color
name: placeholder for color settings' defaults
choices: ['black', 'bright gray', 'blue', 'white', 'green', 'bright blue', 'cyan', 'bright green', 'red', 'bright cyan', 'purple', 'bright red', 'yellow', 'bright purple', 'dark gray', 'bright yellow', 'magenta', 'bright magenta', 'normal']
COLOR_CHANGED:
<<: *color
name: Color for 'changed' task status
default: yellow
description: Defines the color to use on 'Changed' task status
env: [{name: ANSIBLE_COLOR_CHANGED}]
ini:
- {key: changed, section: colors}
COLOR_CONSOLE_PROMPT:
<<: *color
name: "Color for ansible-console's prompt task status"
default: white
description: Defines the default color to use for ansible-console
env: [{name: ANSIBLE_COLOR_CONSOLE_PROMPT}]
ini:
- {key: console_prompt, section: colors}
version_added: "2.7"
COLOR_DEBUG:
<<: *color
name: Color for debug statements
default: dark gray
description: Defines the color to use when emitting debug messages
env: [{name: ANSIBLE_COLOR_DEBUG}]
ini:
- {key: debug, section: colors}
COLOR_DEPRECATE:
<<: *color
name: Color for deprecation messages
default: purple
description: Defines the color to use when emitting deprecation messages
env: [{name: ANSIBLE_COLOR_DEPRECATE}]
ini:
- {key: deprecate, section: colors}
COLOR_DIFF_ADD:
<<: *color
name: Color for diff added display
default: green
description: Defines the color to use when showing added lines in diffs
env: [{name: ANSIBLE_COLOR_DIFF_ADD}]
ini:
- {key: diff_add, section: colors}
yaml: {key: display.colors.diff.add}
COLOR_DIFF_LINES:
<<: *color
name: Color for diff lines display
default: cyan
description: Defines the color to use when showing diffs
env: [{name: ANSIBLE_COLOR_DIFF_LINES}]
ini:
- {key: diff_lines, section: colors}
COLOR_DIFF_REMOVE:
<<: *color
name: Color for diff removed display
default: red
description: Defines the color to use when showing removed lines in diffs
env: [{name: ANSIBLE_COLOR_DIFF_REMOVE}]
ini:
- {key: diff_remove, section: colors}
COLOR_ERROR:
<<: *color
name: Color for error messages
default: red
description: Defines the color to use when emitting error messages
env: [{name: ANSIBLE_COLOR_ERROR}]
ini:
- {key: error, section: colors}
yaml: {key: colors.error}
COLOR_HIGHLIGHT:
<<: *color
name: Color for highlighting
default: white
description: Defines the color to use for highlighting
env: [{name: ANSIBLE_COLOR_HIGHLIGHT}]
ini:
- {key: highlight, section: colors}
COLOR_OK:
<<: *color
name: Color for 'ok' task status
default: green
description: Defines the color to use when showing 'OK' task status
env: [{name: ANSIBLE_COLOR_OK}]
ini:
- {key: ok, section: colors}
COLOR_SKIP:
<<: *color
name: Color for 'skip' task status
default: cyan
description: Defines the color to use when showing 'Skipped' task status
env: [{name: ANSIBLE_COLOR_SKIP}]
ini:
- {key: skip, section: colors}
COLOR_UNREACHABLE:
<<: *color
name: Color for 'unreachable' host state
default: bright red
description: Defines the color to use on 'Unreachable' status
env: [{name: ANSIBLE_COLOR_UNREACHABLE}]
ini:
- {key: unreachable, section: colors}
COLOR_VERBOSE:
<<: *color
name: Color for verbose messages
default: blue
description: Defines the color to use when emitting verbose messages. i.e those that show with '-v's.
env: [{name: ANSIBLE_COLOR_VERBOSE}]
ini:
- {key: verbose, section: colors}
COLOR_WARN:
<<: *color
name: Color for warning messages
default: bright purple
description: Defines the color to use when emitting warning messages
env: [{name: ANSIBLE_COLOR_WARN}]
ini:
- {key: warn, section: colors}
CONNECTION_PASSWORD_FILE:
name: Connection password file
default: ~
description: 'The password file to use for the connection plugin. --connection-password-file.'
env: [{name: ANSIBLE_CONNECTION_PASSWORD_FILE}]
ini:
- {key: connection_password_file, section: defaults}
type: path
version_added: '2.12'
COVERAGE_REMOTE_OUTPUT:
name: Sets the output directory and filename prefix to generate coverage run info.
description:
- Sets the output directory on the remote host to generate coverage reports to.
- Currently only used for remote coverage on PowerShell modules.
- This is for internal use only.
env:
- {name: _ANSIBLE_COVERAGE_REMOTE_OUTPUT}
vars:
- {name: _ansible_coverage_remote_output}
type: str
version_added: '2.9'
COVERAGE_REMOTE_PATHS:
name: Sets the list of paths to run coverage for.
description:
- A list of paths for files on the Ansible controller to run coverage for when executing on the remote host.
- Only files that match the path glob will have its coverage collected.
- Multiple path globs can be specified and are separated by ``:``.
- Currently only used for remote coverage on PowerShell modules.
- This is for internal use only.
default: '*'
env:
- {name: _ANSIBLE_COVERAGE_REMOTE_PATH_FILTER}
type: str
version_added: '2.9'
ACTION_WARNINGS:
name: Toggle action warnings
default: True
description:
- By default Ansible will issue a warning when received from a task action (module or action plugin)
- These warnings can be silenced by adjusting this setting to False.
env: [{name: ANSIBLE_ACTION_WARNINGS}]
ini:
- {key: action_warnings, section: defaults}
type: boolean
version_added: "2.5"
LOCALHOST_WARNING:
name: Warning when using implicit inventory with only localhost
default: True
description:
- By default Ansible will issue a warning when there are no hosts in the
inventory.
- These warnings can be silenced by adjusting this setting to False.
env: [{name: ANSIBLE_LOCALHOST_WARNING}]
ini:
- {key: localhost_warning, section: defaults}
type: boolean
version_added: "2.6"
INVENTORY_UNPARSED_WARNING:
name: Warning when no inventory files can be parsed, resulting in an implicit inventory with only localhost
default: True
description:
- By default Ansible will issue a warning when no inventory was loaded and notes that
it will use an implicit localhost-only inventory.
- These warnings can be silenced by adjusting this setting to False.
env: [{name: ANSIBLE_INVENTORY_UNPARSED_WARNING}]
ini:
- {key: inventory_unparsed_warning, section: inventory}
type: boolean
version_added: "2.14"
DOC_FRAGMENT_PLUGIN_PATH:
name: documentation fragment plugins path
default: '{{ ANSIBLE_HOME ~ "/plugins/doc_fragments:/usr/share/ansible/plugins/doc_fragments" }}'
description: Colon separated paths in which Ansible will search for Documentation Fragments Plugins.
env: [{name: ANSIBLE_DOC_FRAGMENT_PLUGINS}]
ini:
- {key: doc_fragment_plugins, section: defaults}
type: pathspec
DEFAULT_ACTION_PLUGIN_PATH:
name: Action plugins path
default: '{{ ANSIBLE_HOME ~ "/plugins/action:/usr/share/ansible/plugins/action" }}'
description: Colon separated paths in which Ansible will search for Action Plugins.
env: [{name: ANSIBLE_ACTION_PLUGINS}]
ini:
- {key: action_plugins, section: defaults}
type: pathspec
yaml: {key: plugins.action.path}
DEFAULT_ALLOW_UNSAFE_LOOKUPS:
name: Allow unsafe lookups
default: False
description:
- "When enabled, this option allows lookup plugins (whether used in variables as ``{{lookup('foo')}}`` or as a loop as with_foo)
to return data that is not marked 'unsafe'."
- By default, such data is marked as unsafe to prevent the templating engine from evaluating any jinja2 templating language,
as this could represent a security risk. This option is provided to allow for backward compatibility,
however users should first consider adding allow_unsafe=True to any lookups which may be expected to contain data which may be run
through the templating engine late
env: []
ini:
- {key: allow_unsafe_lookups, section: defaults}
type: boolean
version_added: "2.2.3"
DEFAULT_ASK_PASS:
name: Ask for the login password
default: False
description:
- This controls whether an Ansible playbook should prompt for a login password.
If using SSH keys for authentication, you probably do not need to change this setting.
env: [{name: ANSIBLE_ASK_PASS}]
ini:
- {key: ask_pass, section: defaults}
type: boolean
yaml: {key: defaults.ask_pass}
DEFAULT_ASK_VAULT_PASS:
name: Ask for the vault password(s)
default: False
description:
- This controls whether an Ansible playbook should prompt for a vault password.
env: [{name: ANSIBLE_ASK_VAULT_PASS}]
ini:
- {key: ask_vault_pass, section: defaults}
type: boolean
DEFAULT_BECOME:
name: Enable privilege escalation (become)
default: False
description: Toggles the use of privilege escalation, allowing you to 'become' another user after login.
env: [{name: ANSIBLE_BECOME}]
ini:
- {key: become, section: privilege_escalation}
type: boolean
DEFAULT_BECOME_ASK_PASS:
name: Ask for the privilege escalation (become) password
default: False
description: Toggle to prompt for privilege escalation password.
env: [{name: ANSIBLE_BECOME_ASK_PASS}]
ini:
- {key: become_ask_pass, section: privilege_escalation}
type: boolean
DEFAULT_BECOME_METHOD:
name: Choose privilege escalation method
default: 'sudo'
description: Privilege escalation method to use when `become` is enabled.
env: [{name: ANSIBLE_BECOME_METHOD}]
ini:
- {section: privilege_escalation, key: become_method}
DEFAULT_BECOME_EXE:
name: Choose 'become' executable
default: ~
description: 'executable to use for privilege escalation, otherwise Ansible will depend on PATH'
env: [{name: ANSIBLE_BECOME_EXE}]
ini:
- {key: become_exe, section: privilege_escalation}
DEFAULT_BECOME_FLAGS:
name: Set 'become' executable options
default: ~
description: Flags to pass to the privilege escalation executable.
env: [{name: ANSIBLE_BECOME_FLAGS}]
ini:
- {key: become_flags, section: privilege_escalation}
BECOME_PLUGIN_PATH:
name: Become plugins path
default: '{{ ANSIBLE_HOME ~ "/plugins/become:/usr/share/ansible/plugins/become" }}'
description: Colon separated paths in which Ansible will search for Become Plugins.
env: [{name: ANSIBLE_BECOME_PLUGINS}]
ini:
- {key: become_plugins, section: defaults}
type: pathspec
version_added: "2.8"
DEFAULT_BECOME_USER:
# FIXME: should really be blank and make -u passing optional depending on it
name: Set the user you 'become' via privilege escalation
default: root
description: The user your login/remote user 'becomes' when using privilege escalation, most systems will use 'root' when no user is specified.
env: [{name: ANSIBLE_BECOME_USER}]
ini:
- {key: become_user, section: privilege_escalation}
yaml: {key: become.user}
DEFAULT_CACHE_PLUGIN_PATH:
name: Cache Plugins Path
default: '{{ ANSIBLE_HOME ~ "/plugins/cache:/usr/share/ansible/plugins/cache" }}'
description: Colon separated paths in which Ansible will search for Cache Plugins.
env: [{name: ANSIBLE_CACHE_PLUGINS}]
ini:
- {key: cache_plugins, section: defaults}
type: pathspec
DEFAULT_CALLBACK_PLUGIN_PATH:
name: Callback Plugins Path
default: '{{ ANSIBLE_HOME ~ "/plugins/callback:/usr/share/ansible/plugins/callback" }}'
description: Colon separated paths in which Ansible will search for Callback Plugins.
env: [{name: ANSIBLE_CALLBACK_PLUGINS}]
ini:
- {key: callback_plugins, section: defaults}
type: pathspec
yaml: {key: plugins.callback.path}
CALLBACKS_ENABLED:
name: Enable callback plugins that require it.
default: []
description:
- "List of enabled callbacks, not all callbacks need enabling,
but many of those shipped with Ansible do as we don't want them activated by default."
env:
- name: ANSIBLE_CALLBACK_WHITELIST
deprecated:
why: normalizing names to new standard
version: "2.15"
alternatives: 'ANSIBLE_CALLBACKS_ENABLED'
- name: ANSIBLE_CALLBACKS_ENABLED
version_added: '2.11'
ini:
- key: callback_whitelist
section: defaults
deprecated:
why: normalizing names to new standard
version: "2.15"
alternatives: 'callbacks_enabled'
- key: callbacks_enabled
section: defaults
version_added: '2.11'
type: list
DEFAULT_CLICONF_PLUGIN_PATH:
name: Cliconf Plugins Path
default: '{{ ANSIBLE_HOME ~ "/plugins/cliconf:/usr/share/ansible/plugins/cliconf" }}'
description: Colon separated paths in which Ansible will search for Cliconf Plugins.
env: [{name: ANSIBLE_CLICONF_PLUGINS}]
ini:
- {key: cliconf_plugins, section: defaults}
type: pathspec
DEFAULT_CONNECTION_PLUGIN_PATH:
name: Connection Plugins Path
default: '{{ ANSIBLE_HOME ~ "/plugins/connection:/usr/share/ansible/plugins/connection" }}'
description: Colon separated paths in which Ansible will search for Connection Plugins.
env: [{name: ANSIBLE_CONNECTION_PLUGINS}]
ini:
- {key: connection_plugins, section: defaults}
type: pathspec
yaml: {key: plugins.connection.path}
DEFAULT_DEBUG:
name: Debug mode
default: False
description:
- "Toggles debug output in Ansible. This is *very* verbose and can hinder
multiprocessing. Debug output can also include secret information
despite no_log settings being enabled, which means debug mode should not be used in
production."
env: [{name: ANSIBLE_DEBUG}]
ini:
- {key: debug, section: defaults}
type: boolean
DEFAULT_EXECUTABLE:
name: Target shell executable
default: /bin/sh
description:
- "This indicates the command to use to spawn a shell under for Ansible's execution needs on a target.
Users may need to change this in rare instances when shell usage is constrained, but in most cases it may be left as is."
env: [{name: ANSIBLE_EXECUTABLE}]
ini:
- {key: executable, section: defaults}
DEFAULT_FACT_PATH:
name: local fact path
description:
- "This option allows you to globally configure a custom path for 'local_facts' for the implied :ref:`ansible_collections.ansible.builtin.setup_module` task when using fact gathering."
- "If not set, it will fallback to the default from the ``ansible.builtin.setup`` module: ``/etc/ansible/facts.d``."
- "This does **not** affect user defined tasks that use the ``ansible.builtin.setup`` module."
- The real action being created by the implicit task is currently ``ansible.legacy.gather_facts`` module, which then calls the configured fact modules,
by default this will be ``ansible.builtin.setup`` for POSIX systems but other platforms might have different defaults.
env: [{name: ANSIBLE_FACT_PATH}]
ini:
- {key: fact_path, section: defaults}
type: string
deprecated:
# TODO: when removing set playbook/play.py to default=None
why: the module_defaults keyword is a more generic version and can apply to all calls to the
M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions
version: "2.18"
alternatives: module_defaults
DEFAULT_FILTER_PLUGIN_PATH:
name: Jinja2 Filter Plugins Path
default: '{{ ANSIBLE_HOME ~ "/plugins/filter:/usr/share/ansible/plugins/filter" }}'
description: Colon separated paths in which Ansible will search for Jinja2 Filter Plugins.
env: [{name: ANSIBLE_FILTER_PLUGINS}]
ini:
- {key: filter_plugins, section: defaults}
type: pathspec
DEFAULT_FORCE_HANDLERS:
name: Force handlers to run after failure
default: False
description:
- This option controls if notified handlers run on a host even if a failure occurs on that host.
- When false, the handlers will not run if a failure has occurred on a host.
- This can also be set per play or on the command line. See Handlers and Failure for more details.
env: [{name: ANSIBLE_FORCE_HANDLERS}]
ini:
- {key: force_handlers, section: defaults}
type: boolean
version_added: "1.9.1"
DEFAULT_FORKS:
name: Number of task forks
default: 5
description: Maximum number of forks Ansible will use to execute tasks on target hosts.
env: [{name: ANSIBLE_FORKS}]
ini:
- {key: forks, section: defaults}
type: integer
DEFAULT_GATHERING:
name: Gathering behaviour
default: 'implicit'
description:
- This setting controls the default policy of fact gathering (facts discovered about remote systems).
- "This option can be useful for those wishing to save fact gathering time. Both 'smart' and 'explicit' will use the cache plugin."
env: [{name: ANSIBLE_GATHERING}]
ini:
- key: gathering
section: defaults
version_added: "1.6"
choices:
implicit: "the cache plugin will be ignored and facts will be gathered per play unless 'gather_facts: False' is set."
explicit: facts will not be gathered unless directly requested in the play.
smart: each new host that has no facts discovered will be scanned, but if the same host is addressed in multiple plays it will not be contacted again in the run.
DEFAULT_GATHER_SUBSET:
name: Gather facts subset
description:
- Set the `gather_subset` option for the :ref:`ansible_collections.ansible.builtin.setup_module` task in the implicit fact gathering.
See the module documentation for specifics.
- "It does **not** apply to user defined ``ansible.builtin.setup`` tasks."
env: [{name: ANSIBLE_GATHER_SUBSET}]
ini:
- key: gather_subset
section: defaults
version_added: "2.1"
type: list
deprecated:
# TODO: when removing set playbook/play.py to default=None
why: the module_defaults keyword is a more generic version and can apply to all calls to the
M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions
version: "2.18"
alternatives: module_defaults
DEFAULT_GATHER_TIMEOUT:
name: Gather facts timeout
description:
- Set the timeout in seconds for the implicit fact gathering, see the module documentation for specifics.
- "It does **not** apply to user defined :ref:`ansible_collections.ansible.builtin.setup_module` tasks."
env: [{name: ANSIBLE_GATHER_TIMEOUT}]
ini:
- {key: gather_timeout, section: defaults}
type: integer
deprecated:
# TODO: when removing set playbook/play.py to default=None
why: the module_defaults keyword is a more generic version and can apply to all calls to the
M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions
version: "2.18"
alternatives: module_defaults
DEFAULT_HASH_BEHAVIOUR:
name: Hash merge behaviour
default: replace
type: string
choices:
replace: Any variable that is defined more than once is overwritten using the order from variable precedence rules (highest wins).
merge: Any dictionary variable will be recursively merged with new definitions across the different variable definition sources.
description:
- This setting controls how duplicate definitions of dictionary variables (aka hash, map, associative array) are handled in Ansible.
- This does not affect variables whose values are scalars (integers, strings) or arrays.
- "**WARNING**, changing this setting is not recommended as this is fragile and makes your content (plays, roles, collections) non portable,
leading to continual confusion and misuse. Don't change this setting unless you think you have an absolute need for it."
- We recommend avoiding reusing variable names and relying on the ``combine`` filter and ``vars`` and ``varnames`` lookups
to create merged versions of the individual variables. In our experience this is rarely really needed and a sign that too much
complexity has been introduced into the data structures and plays.
- For some uses you can also look into custom vars_plugins to merge on input, even substituting the default ``host_group_vars``
that is in charge of parsing the ``host_vars/`` and ``group_vars/`` directories. Most users of this setting are only interested in inventory scope,
but the setting itself affects all sources and makes debugging even harder.
- All playbooks and roles in the official examples repos assume the default for this setting.
- Changing the setting to ``merge`` applies across variable sources, but many sources will internally still overwrite the variables.
For example ``include_vars`` will dedupe variables internally before updating Ansible, with 'last defined' overwriting previous definitions in same file.
- The Ansible project recommends you **avoid ``merge`` for new projects.**
- It is the intention of the Ansible developers to eventually deprecate and remove this setting, but it is being kept as some users do heavily rely on it.
New projects should **avoid 'merge'**.
env: [{name: ANSIBLE_HASH_BEHAVIOUR}]
ini:
- {key: hash_behaviour, section: defaults}
DEFAULT_HOST_LIST:
name: Inventory Source
default: /etc/ansible/hosts
description: Comma separated list of Ansible inventory sources
env:
- name: ANSIBLE_INVENTORY
expand_relative_paths: True
ini:
- key: inventory
section: defaults
type: pathlist
yaml: {key: defaults.inventory}
DEFAULT_HTTPAPI_PLUGIN_PATH:
name: HttpApi Plugins Path
default: '{{ ANSIBLE_HOME ~ "/plugins/httpapi:/usr/share/ansible/plugins/httpapi" }}'
description: Colon separated paths in which Ansible will search for HttpApi Plugins.
env: [{name: ANSIBLE_HTTPAPI_PLUGINS}]
ini:
- {key: httpapi_plugins, section: defaults}
type: pathspec
DEFAULT_INTERNAL_POLL_INTERVAL:
name: Internal poll interval
default: 0.001
env: []
ini:
- {key: internal_poll_interval, section: defaults}
type: float
version_added: "2.2"
description:
- This sets the interval (in seconds) of Ansible internal processes polling each other.
Lower values improve performance with large playbooks at the expense of extra CPU load.
Higher values are more suitable for Ansible usage in automation scenarios,
when UI responsiveness is not required but CPU usage might be a concern.
- "The default corresponds to the value hardcoded in Ansible <= 2.1"
DEFAULT_INVENTORY_PLUGIN_PATH:
name: Inventory Plugins Path
default: '{{ ANSIBLE_HOME ~ "/plugins/inventory:/usr/share/ansible/plugins/inventory" }}'
description: Colon separated paths in which Ansible will search for Inventory Plugins.
env: [{name: ANSIBLE_INVENTORY_PLUGINS}]
ini:
- {key: inventory_plugins, section: defaults}
type: pathspec
DEFAULT_JINJA2_EXTENSIONS:
name: Enabled Jinja2 extensions
default: []
description:
- This is a developer-specific feature that allows enabling additional Jinja2 extensions.
- "See the Jinja2 documentation for details. If you do not know what these do, you probably don't need to change this setting :)"
env: [{name: ANSIBLE_JINJA2_EXTENSIONS}]
ini:
- {key: jinja2_extensions, section: defaults}
DEFAULT_JINJA2_NATIVE:
name: Use Jinja2's NativeEnvironment for templating
default: False
description: This option preserves variable types during template operations.
env: [{name: ANSIBLE_JINJA2_NATIVE}]
ini:
- {key: jinja2_native, section: defaults}
type: boolean
yaml: {key: jinja2_native}
version_added: 2.7
DEFAULT_KEEP_REMOTE_FILES:
name: Keep remote files
default: False
description:
- Enables/disables the cleaning up of the temporary files Ansible used to execute the tasks on the remote.
- If this option is enabled it will disable ``ANSIBLE_PIPELINING``.
env: [{name: ANSIBLE_KEEP_REMOTE_FILES}]
ini:
- {key: keep_remote_files, section: defaults}
type: boolean
DEFAULT_LIBVIRT_LXC_NOSECLABEL:
# TODO: move to plugin
name: No security label on Lxc
default: False
description:
- "This setting causes libvirt to connect to lxc containers by passing --noseclabel to virsh.
This is necessary when running on systems which do not have SELinux."
env:
- name: ANSIBLE_LIBVIRT_LXC_NOSECLABEL
ini:
- {key: libvirt_lxc_noseclabel, section: selinux}
type: boolean
version_added: "2.1"
DEFAULT_LOAD_CALLBACK_PLUGINS:
name: Load callbacks for adhoc
default: False
description:
- Controls whether callback plugins are loaded when running /usr/bin/ansible.
This may be used to log activity from the command line, send notifications, and so on.
Callback plugins are always loaded for ``ansible-playbook``.
env: [{name: ANSIBLE_LOAD_CALLBACK_PLUGINS}]
ini:
- {key: bin_ansible_callbacks, section: defaults}
type: boolean
version_added: "1.8"
DEFAULT_LOCAL_TMP:
name: Controller temporary directory
default: '{{ ANSIBLE_HOME ~ "/tmp" }}'
description: Temporary directory for Ansible to use on the controller.
env: [{name: ANSIBLE_LOCAL_TEMP}]
ini:
- {key: local_tmp, section: defaults}
type: tmppath
DEFAULT_LOG_PATH:
name: Ansible log file path
default: ~
description: File to which Ansible will log on the controller. When empty logging is disabled.
env: [{name: ANSIBLE_LOG_PATH}]
ini:
- {key: log_path, section: defaults}
type: path
DEFAULT_LOG_FILTER:
name: Name filters for python logger
default: []
description: List of logger names to filter out of the log file
env: [{name: ANSIBLE_LOG_FILTER}]
ini:
- {key: log_filter, section: defaults}
type: list
DEFAULT_LOOKUP_PLUGIN_PATH:
name: Lookup Plugins Path
description: Colon separated paths in which Ansible will search for Lookup Plugins.
default: '{{ ANSIBLE_HOME ~ "/plugins/lookup:/usr/share/ansible/plugins/lookup" }}'
env: [{name: ANSIBLE_LOOKUP_PLUGINS}]
ini:
- {key: lookup_plugins, section: defaults}
type: pathspec
yaml: {key: defaults.lookup_plugins}
DEFAULT_MANAGED_STR:
name: Ansible managed
default: 'Ansible managed'
description: Sets the macro for the 'ansible_managed' variable available for :ref:`ansible_collections.ansible.builtin.template_module` and :ref:`ansible_collections.ansible.windows.win_template_module`. This is only relevant for those two modules.
env: []
ini:
- {key: ansible_managed, section: defaults}
yaml: {key: defaults.ansible_managed}
DEFAULT_MODULE_ARGS:
name: Adhoc default arguments
default: ~
description:
- This sets the default arguments to pass to the ``ansible`` adhoc binary if no ``-a`` is specified.
env: [{name: ANSIBLE_MODULE_ARGS}]
ini:
- {key: module_args, section: defaults}
DEFAULT_MODULE_COMPRESSION:
name: Python module compression
default: ZIP_DEFLATED
description: Compression scheme to use when transferring Python modules to the target.
env: []
ini:
- {key: module_compression, section: defaults}
# vars:
# - name: ansible_module_compression
DEFAULT_MODULE_NAME:
name: Default adhoc module
default: command
description: "Module to use with the ``ansible`` AdHoc command, if none is specified via ``-m``."
env: []
ini:
- {key: module_name, section: defaults}
DEFAULT_MODULE_PATH:
name: Modules Path
description: Colon separated paths in which Ansible will search for Modules.
default: '{{ ANSIBLE_HOME ~ "/plugins/modules:/usr/share/ansible/plugins/modules" }}'
env: [{name: ANSIBLE_LIBRARY}]
ini:
- {key: library, section: defaults}
type: pathspec
DEFAULT_MODULE_UTILS_PATH:
name: Module Utils Path
description: Colon separated paths in which Ansible will search for Module utils files, which are shared by modules.
default: '{{ ANSIBLE_HOME ~ "/plugins/module_utils:/usr/share/ansible/plugins/module_utils" }}'
env: [{name: ANSIBLE_MODULE_UTILS}]
ini:
- {key: module_utils, section: defaults}
type: pathspec
DEFAULT_NETCONF_PLUGIN_PATH:
name: Netconf Plugins Path
default: '{{ ANSIBLE_HOME ~ "/plugins/netconf:/usr/share/ansible/plugins/netconf" }}'
description: Colon separated paths in which Ansible will search for Netconf Plugins.
env: [{name: ANSIBLE_NETCONF_PLUGINS}]
ini:
- {key: netconf_plugins, section: defaults}
type: pathspec
DEFAULT_NO_LOG:
name: No log
default: False
description: "Toggle Ansible's display and logging of task details, mainly used to avoid security disclosures."
env: [{name: ANSIBLE_NO_LOG}]
ini:
- {key: no_log, section: defaults}
type: boolean
DEFAULT_NO_TARGET_SYSLOG:
name: No syslog on target
default: False
description:
- Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts this will disable a newer
style PowerShell modules from writting to the event log.
env: [{name: ANSIBLE_NO_TARGET_SYSLOG}]
ini:
- {key: no_target_syslog, section: defaults}
vars:
- name: ansible_no_target_syslog
version_added: '2.10'
type: boolean
yaml: {key: defaults.no_target_syslog}
DEFAULT_NULL_REPRESENTATION:
name: Represent a null
default: ~
description: What templating should return as a 'null' value. When not set it will let Jinja2 decide.
env: [{name: ANSIBLE_NULL_REPRESENTATION}]
ini:
- {key: null_representation, section: defaults}
type: none
DEFAULT_POLL_INTERVAL:
name: Async poll interval
default: 15
description:
- For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling),
this is how often to check back on the status of those tasks when an explicit poll interval is not supplied.
The default is a reasonably moderate 15 seconds which is a tradeoff between checking in frequently and
providing a quick turnaround when something may have completed.
env: [{name: ANSIBLE_POLL_INTERVAL}]
ini:
- {key: poll_interval, section: defaults}
type: integer
DEFAULT_PRIVATE_KEY_FILE:
name: Private key file
default: ~
description:
- Option for connections using a certificate or key file to authenticate, rather than an agent or passwords,
you can set the default value here to avoid re-specifying --private-key with every invocation.
env: [{name: ANSIBLE_PRIVATE_KEY_FILE}]
ini:
- {key: private_key_file, section: defaults}
type: path
DEFAULT_PRIVATE_ROLE_VARS:
name: Private role variables
default: False
description:
- Makes role variables inaccessible from other roles.
- This was introduced as a way to reset role variables to default values if
a role is used more than once in a playbook.
env: [{name: ANSIBLE_PRIVATE_ROLE_VARS}]
ini:
- {key: private_role_vars, section: defaults}
type: boolean
yaml: {key: defaults.private_role_vars}
DEFAULT_REMOTE_PORT:
name: Remote port
default: ~
description: Port to use in remote connections, when blank it will use the connection plugin default.
env: [{name: ANSIBLE_REMOTE_PORT}]
ini:
- {key: remote_port, section: defaults}
type: integer
yaml: {key: defaults.remote_port}
DEFAULT_REMOTE_USER:
name: Login/Remote User
description:
- Sets the login user for the target machines
- "When blank it uses the connection plugin's default, normally the user currently executing Ansible."
env: [{name: ANSIBLE_REMOTE_USER}]
ini:
- {key: remote_user, section: defaults}
DEFAULT_ROLES_PATH:
name: Roles path
default: '{{ ANSIBLE_HOME ~ "/roles:/usr/share/ansible/roles:/etc/ansible/roles" }}'
description: Colon separated paths in which Ansible will search for Roles.
env: [{name: ANSIBLE_ROLES_PATH}]
expand_relative_paths: True
ini:
- {key: roles_path, section: defaults}
type: pathspec
yaml: {key: defaults.roles_path}
DEFAULT_SELINUX_SPECIAL_FS:
name: Problematic file systems
default: fuse, nfs, vboxsf, ramfs, 9p, vfat
description:
- "Some filesystems do not support safe operations and/or return inconsistent errors,
this setting makes Ansible 'tolerate' those in the list w/o causing fatal errors."
- Data corruption may occur and writes are not always verified when a filesystem is in the list.
env:
- name: ANSIBLE_SELINUX_SPECIAL_FS
version_added: "2.9"
ini:
- {key: special_context_filesystems, section: selinux}
type: list
DEFAULT_STDOUT_CALLBACK:
name: Main display callback plugin
default: default
description:
- "Set the main callback used to display Ansible output. You can only have one at a time."
- You can have many other callbacks, but just one can be in charge of stdout.
- See :ref:`callback_plugins` for a list of available options.
env: [{name: ANSIBLE_STDOUT_CALLBACK}]
ini:
- {key: stdout_callback, section: defaults}
ENABLE_TASK_DEBUGGER:
name: Whether to enable the task debugger
default: False
description:
- Whether or not to enable the task debugger, this previously was done as a strategy plugin.
- Now all strategy plugins can inherit this behavior. The debugger defaults to activating when
- a task is failed on unreachable. Use the debugger keyword for more flexibility.
type: boolean
env: [{name: ANSIBLE_ENABLE_TASK_DEBUGGER}]
ini:
- {key: enable_task_debugger, section: defaults}
version_added: "2.5"
TASK_DEBUGGER_IGNORE_ERRORS:
name: Whether a failed task with ignore_errors=True will still invoke the debugger
default: True
description:
- This option defines whether the task debugger will be invoked on a failed task when ignore_errors=True
is specified.
- True specifies that the debugger will honor ignore_errors, False will not honor ignore_errors.
type: boolean
env: [{name: ANSIBLE_TASK_DEBUGGER_IGNORE_ERRORS}]
ini:
- {key: task_debugger_ignore_errors, section: defaults}
version_added: "2.7"
DEFAULT_STRATEGY:
name: Implied strategy
default: 'linear'
description: Set the default strategy used for plays.
env: [{name: ANSIBLE_STRATEGY}]
ini:
- {key: strategy, section: defaults}
version_added: "2.3"
DEFAULT_STRATEGY_PLUGIN_PATH:
name: Strategy Plugins Path
description: Colon separated paths in which Ansible will search for Strategy Plugins.
default: '{{ ANSIBLE_HOME ~ "/plugins/strategy:/usr/share/ansible/plugins/strategy" }}'
env: [{name: ANSIBLE_STRATEGY_PLUGINS}]
ini:
- {key: strategy_plugins, section: defaults}
type: pathspec
DEFAULT_SU:
default: False
description: 'Toggle the use of "su" for tasks.'
env: [{name: ANSIBLE_SU}]
ini:
- {key: su, section: defaults}
type: boolean
yaml: {key: defaults.su}
DEFAULT_SYSLOG_FACILITY:
name: syslog facility
default: LOG_USER
description: Syslog facility to use when Ansible logs to the remote target
env: [{name: ANSIBLE_SYSLOG_FACILITY}]
ini:
- {key: syslog_facility, section: defaults}
DEFAULT_TERMINAL_PLUGIN_PATH:
name: Terminal Plugins Path
default: '{{ ANSIBLE_HOME ~ "/plugins/terminal:/usr/share/ansible/plugins/terminal" }}'
description: Colon separated paths in which Ansible will search for Terminal Plugins.
env: [{name: ANSIBLE_TERMINAL_PLUGINS}]
ini:
- {key: terminal_plugins, section: defaults}
type: pathspec
DEFAULT_TEST_PLUGIN_PATH:
name: Jinja2 Test Plugins Path
description: Colon separated paths in which Ansible will search for Jinja2 Test Plugins.
default: '{{ ANSIBLE_HOME ~ "/plugins/test:/usr/share/ansible/plugins/test" }}'
env: [{name: ANSIBLE_TEST_PLUGINS}]
ini:
- {key: test_plugins, section: defaults}
type: pathspec
DEFAULT_TIMEOUT:
name: Connection timeout
default: 10
description: This is the default timeout for connection plugins to use.
env: [{name: ANSIBLE_TIMEOUT}]
ini:
- {key: timeout, section: defaults}
type: integer
DEFAULT_TRANSPORT:
# note that ssh_utils refs this and needs to be updated if removed
name: Connection plugin
default: smart
description: "Default connection plugin to use, the 'smart' option will toggle between 'ssh' and 'paramiko' depending on controller OS and ssh versions"
env: [{name: ANSIBLE_TRANSPORT}]
ini:
- {key: transport, section: defaults}
DEFAULT_UNDEFINED_VAR_BEHAVIOR:
name: Jinja2 fail on undefined
default: True
version_added: "1.3"
description:
- When True, this causes ansible templating to fail steps that reference variable names that are likely typoed.
- "Otherwise, any '{{ template_expression }}' that contains undefined variables will be rendered in a template or ansible action line exactly as written."
env: [{name: ANSIBLE_ERROR_ON_UNDEFINED_VARS}]
ini:
- {key: error_on_undefined_vars, section: defaults}
type: boolean
DEFAULT_VARS_PLUGIN_PATH:
name: Vars Plugins Path
default: '{{ ANSIBLE_HOME ~ "/plugins/vars:/usr/share/ansible/plugins/vars" }}'
description: Colon separated paths in which Ansible will search for Vars Plugins.
env: [{name: ANSIBLE_VARS_PLUGINS}]
ini:
- {key: vars_plugins, section: defaults}
type: pathspec
# TODO: unused?
#DEFAULT_VAR_COMPRESSION_LEVEL:
# default: 0
# description: 'TODO: write it'
# env: [{name: ANSIBLE_VAR_COMPRESSION_LEVEL}]
# ini:
# - {key: var_compression_level, section: defaults}
# type: integer
# yaml: {key: defaults.var_compression_level}
DEFAULT_VAULT_ID_MATCH:
name: Force vault id match
default: False
description: 'If true, decrypting vaults with a vault id will only try the password from the matching vault-id'
env: [{name: ANSIBLE_VAULT_ID_MATCH}]
ini:
- {key: vault_id_match, section: defaults}
yaml: {key: defaults.vault_id_match}
DEFAULT_VAULT_IDENTITY:
name: Vault id label
default: default
description: 'The label to use for the default vault id label in cases where a vault id label is not provided'
env: [{name: ANSIBLE_VAULT_IDENTITY}]
ini:
- {key: vault_identity, section: defaults}
yaml: {key: defaults.vault_identity}
DEFAULT_VAULT_ENCRYPT_IDENTITY:
name: Vault id to use for encryption
description: 'The vault_id to use for encrypting by default. If multiple vault_ids are provided, this specifies which to use for encryption. The --encrypt-vault-id cli option overrides the configured value.'
env: [{name: ANSIBLE_VAULT_ENCRYPT_IDENTITY}]
ini:
- {key: vault_encrypt_identity, section: defaults}
yaml: {key: defaults.vault_encrypt_identity}
DEFAULT_VAULT_IDENTITY_LIST:
name: Default vault ids
default: []
description: 'A list of vault-ids to use by default. Equivalent to multiple --vault-id args. Vault-ids are tried in order.'
env: [{name: ANSIBLE_VAULT_IDENTITY_LIST}]
ini:
- {key: vault_identity_list, section: defaults}
type: list
yaml: {key: defaults.vault_identity_list}
DEFAULT_VAULT_PASSWORD_FILE:
name: Vault password file
default: ~
description:
- 'The vault password file to use. Equivalent to --vault-password-file or --vault-id'
- If executable, it will be run and the resulting stdout will be used as the password.
env: [{name: ANSIBLE_VAULT_PASSWORD_FILE}]
ini:
- {key: vault_password_file, section: defaults}
type: path
yaml: {key: defaults.vault_password_file}
DEFAULT_VERBOSITY:
name: Verbosity
default: 0
description: Sets the default verbosity, equivalent to the number of ``-v`` passed in the command line.
env: [{name: ANSIBLE_VERBOSITY}]
ini:
- {key: verbosity, section: defaults}
type: integer
DEPRECATION_WARNINGS:
name: Deprecation messages
default: True
description: "Toggle to control the showing of deprecation warnings"
env: [{name: ANSIBLE_DEPRECATION_WARNINGS}]
ini:
- {key: deprecation_warnings, section: defaults}
type: boolean
DEVEL_WARNING:
name: Running devel warning
default: True
description: Toggle to control showing warnings related to running devel
env: [{name: ANSIBLE_DEVEL_WARNING}]
ini:
- {key: devel_warning, section: defaults}
type: boolean
DIFF_ALWAYS:
name: Show differences
default: False
description: Configuration toggle to tell modules to show differences when in 'changed' status, equivalent to ``--diff``.
env: [{name: ANSIBLE_DIFF_ALWAYS}]
ini:
- {key: always, section: diff}
type: bool
DIFF_CONTEXT:
name: Difference context
default: 3
description: How many lines of context to show when displaying the differences between files.
env: [{name: ANSIBLE_DIFF_CONTEXT}]
ini:
- {key: context, section: diff}
type: integer
DISPLAY_ARGS_TO_STDOUT:
name: Show task arguments
default: False
description:
- "Normally ``ansible-playbook`` will print a header for each task that is run.
These headers will contain the name: field from the task if you specified one.
If you didn't then ``ansible-playbook`` uses the task's action to help you tell which task is presently running.
Sometimes you run many of the same action and so you want more information about the task to differentiate it from others of the same action.
If you set this variable to True in the config then ``ansible-playbook`` will also include the task's arguments in the header."
- "This setting defaults to False because there is a chance that you have sensitive values in your parameters and
you do not want those to be printed."
- "If you set this to True you should be sure that you have secured your environment's stdout
(no one can shoulder surf your screen and you aren't saving stdout to an insecure file) or
made sure that all of your playbooks explicitly added the ``no_log: True`` parameter to tasks which have sensitive values
See How do I keep secret data in my playbook? for more information."
env: [{name: ANSIBLE_DISPLAY_ARGS_TO_STDOUT}]
ini:
- {key: display_args_to_stdout, section: defaults}
type: boolean
version_added: "2.1"
DISPLAY_SKIPPED_HOSTS:
name: Show skipped results
default: True
description: "Toggle to control displaying skipped task/host entries in a task in the default callback"
env:
- name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
ini:
- {key: display_skipped_hosts, section: defaults}
type: boolean
DOCSITE_ROOT_URL:
name: Root docsite URL
default: https://docs.ansible.com/ansible-core/
description: Root docsite URL used to generate docs URLs in warning/error text;
must be an absolute URL with valid scheme and trailing slash.
ini:
- {key: docsite_root_url, section: defaults}
version_added: "2.8"
DUPLICATE_YAML_DICT_KEY:
name: Controls ansible behaviour when finding duplicate keys in YAML.
default: warn
description:
- By default Ansible will issue a warning when a duplicate dict key is encountered in YAML.
- These warnings can be silenced by adjusting this setting to False.
env: [{name: ANSIBLE_DUPLICATE_YAML_DICT_KEY}]
ini:
- {key: duplicate_dict_key, section: defaults}
type: string
choices: &basic_error2
error: issue a 'fatal' error and stop the play
warn: issue a warning but continue
ignore: just continue silently
version_added: "2.9"
ERROR_ON_MISSING_HANDLER:
name: Missing handler error
default: True
description: "Toggle to allow missing handlers to become a warning instead of an error when notifying."
env: [{name: ANSIBLE_ERROR_ON_MISSING_HANDLER}]
ini:
- {key: error_on_missing_handler, section: defaults}
type: boolean
CONNECTION_FACTS_MODULES:
name: Map of connections to fact modules
default:
# use ansible.legacy names on unqualified facts modules to allow library/ overrides
asa: ansible.legacy.asa_facts
cisco.asa.asa: cisco.asa.asa_facts
eos: ansible.legacy.eos_facts
arista.eos.eos: arista.eos.eos_facts
frr: ansible.legacy.frr_facts
frr.frr.frr: frr.frr.frr_facts
ios: ansible.legacy.ios_facts
cisco.ios.ios: cisco.ios.ios_facts
iosxr: ansible.legacy.iosxr_facts
cisco.iosxr.iosxr: cisco.iosxr.iosxr_facts
junos: ansible.legacy.junos_facts
junipernetworks.junos.junos: junipernetworks.junos.junos_facts
nxos: ansible.legacy.nxos_facts
cisco.nxos.nxos: cisco.nxos.nxos_facts
vyos: ansible.legacy.vyos_facts
vyos.vyos.vyos: vyos.vyos.vyos_facts
exos: ansible.legacy.exos_facts
extreme.exos.exos: extreme.exos.exos_facts
slxos: ansible.legacy.slxos_facts
extreme.slxos.slxos: extreme.slxos.slxos_facts
voss: ansible.legacy.voss_facts
extreme.voss.voss: extreme.voss.voss_facts
ironware: ansible.legacy.ironware_facts
community.network.ironware: community.network.ironware_facts
description: "Which modules to run during a play's fact gathering stage based on connection"
type: dict
FACTS_MODULES:
name: Gather Facts Modules
default:
- smart
description:
- "Which modules to run during a play's fact gathering stage, using the default of 'smart' will try to figure it out based on connection type."
- "If adding your own modules but you still want to use the default Ansible facts, you will want to include 'setup'
or corresponding network module to the list (if you add 'smart', Ansible will also figure it out)."
- "This does not affect explicit calls to the 'setup' module, but does always affect the 'gather_facts' action (implicit or explicit)."
env: [{name: ANSIBLE_FACTS_MODULES}]
ini:
- {key: facts_modules, section: defaults}
type: list
vars:
- name: ansible_facts_modules
GALAXY_IGNORE_CERTS:
name: Galaxy validate certs
description:
- If set to yes, ansible-galaxy will not validate TLS certificates.
This can be useful for testing against a server with a self-signed certificate.
env: [{name: ANSIBLE_GALAXY_IGNORE}]
ini:
- {key: ignore_certs, section: galaxy}
type: boolean
GALAXY_ROLE_SKELETON:
name: Galaxy role skeleton directory
description: Role skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy``/``ansible-galaxy role``, same as ``--role-skeleton``.
env: [{name: ANSIBLE_GALAXY_ROLE_SKELETON}]
ini:
- {key: role_skeleton, section: galaxy}
type: path
GALAXY_ROLE_SKELETON_IGNORE:
name: Galaxy role skeleton ignore
default: ["^.git$", "^.*/.git_keep$"]
description: patterns of files to ignore inside a Galaxy role or collection skeleton directory
env: [{name: ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE}]
ini:
- {key: role_skeleton_ignore, section: galaxy}
type: list
GALAXY_COLLECTION_SKELETON:
name: Galaxy collection skeleton directory
description: Collection skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy collection``, same as ``--collection-skeleton``.
env: [{name: ANSIBLE_GALAXY_COLLECTION_SKELETON}]
ini:
- {key: collection_skeleton, section: galaxy}
type: path
GALAXY_COLLECTION_SKELETON_IGNORE:
name: Galaxy collection skeleton ignore
default: ["^.git$", "^.*/.git_keep$"]
description: patterns of files to ignore inside a Galaxy collection skeleton directory
env: [{name: ANSIBLE_GALAXY_COLLECTION_SKELETON_IGNORE}]
ini:
- {key: collection_skeleton_ignore, section: galaxy}
type: list
# TODO: unused?
#GALAXY_SCMS:
# name: Galaxy SCMS
# default: git, hg
# description: Available galaxy source control management systems.
# env: [{name: ANSIBLE_GALAXY_SCMS}]
# ini:
# - {key: scms, section: galaxy}
# type: list
GALAXY_SERVER:
default: https://galaxy.ansible.com
description: "URL to prepend when roles don't specify the full URI, assume they are referencing this server as the source."
env: [{name: ANSIBLE_GALAXY_SERVER}]
ini:
- {key: server, section: galaxy}
yaml: {key: galaxy.server}
GALAXY_SERVER_LIST:
description:
- A list of Galaxy servers to use when installing a collection.
- The value corresponds to the config ini header ``[galaxy_server.{{item}}]`` which defines the server details.
- 'See :ref:`galaxy_server_config` for more details on how to define a Galaxy server.'
- The order of servers in this list is used to as the order in which a collection is resolved.
- Setting this config option will ignore the :ref:`galaxy_server` config option.
env: [{name: ANSIBLE_GALAXY_SERVER_LIST}]
ini:
- {key: server_list, section: galaxy}
type: list
version_added: "2.9"
GALAXY_TOKEN_PATH:
default: '{{ ANSIBLE_HOME ~ "/galaxy_token" }}'
description: "Local path to galaxy access token file"
env: [{name: ANSIBLE_GALAXY_TOKEN_PATH}]
ini:
- {key: token_path, section: galaxy}
type: path
version_added: "2.9"
GALAXY_DISPLAY_PROGRESS:
default: ~
description:
- Some steps in ``ansible-galaxy`` display a progress wheel which can cause issues on certain displays or when
outputing the stdout to a file.
- This config option controls whether the display wheel is shown or not.
- The default is to show the display wheel if stdout has a tty.
env: [{name: ANSIBLE_GALAXY_DISPLAY_PROGRESS}]
ini:
- {key: display_progress, section: galaxy}
type: bool
version_added: "2.10"
GALAXY_CACHE_DIR:
default: '{{ ANSIBLE_HOME ~ "/galaxy_cache" }}'
description:
- The directory that stores cached responses from a Galaxy server.
- This is only used by the ``ansible-galaxy collection install`` and ``download`` commands.
- Cache files inside this dir will be ignored if they are world writable.
env:
- name: ANSIBLE_GALAXY_CACHE_DIR
ini:
- section: galaxy
key: cache_dir
type: path
version_added: '2.11'
GALAXY_DISABLE_GPG_VERIFY:
default: false
type: bool
env:
- name: ANSIBLE_GALAXY_DISABLE_GPG_VERIFY
ini:
- section: galaxy
key: disable_gpg_verify
description:
- Disable GPG signature verification during collection installation.
version_added: '2.13'
GALAXY_GPG_KEYRING:
type: path
env:
- name: ANSIBLE_GALAXY_GPG_KEYRING
ini:
- section: galaxy
key: gpg_keyring
description:
- Configure the keyring used for GPG signature verification during collection installation and verification.
version_added: '2.13'
GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES:
type: list
env:
- name: ANSIBLE_GALAXY_IGNORE_SIGNATURE_STATUS_CODES
ini:
- section: galaxy
key: ignore_signature_status_codes
description:
- A list of GPG status codes to ignore during GPG signature verfication.
See L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes) for status code descriptions.
- If fewer signatures successfully verify the collection than `GALAXY_REQUIRED_VALID_SIGNATURE_COUNT`,
signature verification will fail even if all error codes are ignored.
choices:
- EXPSIG
- EXPKEYSIG
- REVKEYSIG
- BADSIG
- ERRSIG
- NO_PUBKEY
- MISSING_PASSPHRASE
- BAD_PASSPHRASE
- NODATA
- UNEXPECTED
- ERROR
- FAILURE
- BADARMOR
- KEYEXPIRED
- KEYREVOKED
- NO_SECKEY
GALAXY_REQUIRED_VALID_SIGNATURE_COUNT:
type: str
default: 1
env:
- name: ANSIBLE_GALAXY_REQUIRED_VALID_SIGNATURE_COUNT
ini:
- section: galaxy
key: required_valid_signature_count
description:
- The number of signatures that must be successful during GPG signature verification while installing or verifying collections.
- This should be a positive integer or all to indicate all signatures must successfully validate the collection.
- Prepend + to the value to fail if no valid signatures are found for the collection.
HOST_KEY_CHECKING:
# note: constant not in use by ssh plugin anymore
# TODO: check non ssh connection plugins for use/migration
name: Check host keys
default: True
description: 'Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host'
env: [{name: ANSIBLE_HOST_KEY_CHECKING}]
ini:
- {key: host_key_checking, section: defaults}
type: boolean
HOST_PATTERN_MISMATCH:
name: Control host pattern mismatch behaviour
default: 'warning'
description: This setting changes the behaviour of mismatched host patterns, it allows you to force a fatal error, a warning or just ignore it
env: [{name: ANSIBLE_HOST_PATTERN_MISMATCH}]
ini:
- {key: host_pattern_mismatch, section: inventory}
choices:
<<: *basic_error
version_added: "2.8"
INTERPRETER_PYTHON:
name: Python interpreter path (or automatic discovery behavior) used for module execution
default: auto
env: [{name: ANSIBLE_PYTHON_INTERPRETER}]
ini:
- {key: interpreter_python, section: defaults}
vars:
- {name: ansible_python_interpreter}
version_added: "2.8"
description:
- Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode.
Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``.
All discovery modes employ a lookup table to use the included system Python (on distributions known to include one),
falling back to a fixed ordered list of well-known Python interpreter locations if a platform-specific default is not
available. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters
installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent`` or
``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backwards-compatibility
with older Ansible releases that always defaulted to ``/usr/bin/python``, will use that interpreter if present.
_INTERPRETER_PYTHON_DISTRO_MAP:
name: Mapping of known included platform pythons for various Linux distros
default:
redhat:
'6': /usr/bin/python
'8': /usr/libexec/platform-python
'9': /usr/bin/python3
debian:
'8': /usr/bin/python
'10': /usr/bin/python3
fedora:
'23': /usr/bin/python3
ubuntu:
'14': /usr/bin/python
'16': /usr/bin/python3
version_added: "2.8"
# FUTURE: add inventory override once we're sure it can't be abused by a rogue target
# FUTURE: add a platform layer to the map so we could use for, eg, freebsd/macos/etc?
INTERPRETER_PYTHON_FALLBACK:
name: Ordered list of Python interpreters to check for in discovery
default:
- python3.11
- python3.10
- python3.9
- python3.8
- python3.7
- python3.6
- python3.5
- /usr/bin/python3
- /usr/libexec/platform-python
- python2.7
- /usr/bin/python
- python
vars:
- name: ansible_interpreter_python_fallback
type: list
version_added: "2.8"
TRANSFORM_INVALID_GROUP_CHARS:
name: Transform invalid characters in group names
default: 'never'
description:
- Make ansible transform invalid characters in group names supplied by inventory sources.
env: [{name: ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS}]
ini:
- {key: force_valid_group_names, section: defaults}
type: string
choices:
always: it will replace any invalid characters with '_' (underscore) and warn the user
never: it will allow for the group name but warn about the issue
ignore: it does the same as 'never', without issuing a warning
silently: it does the same as 'always', without issuing a warning
version_added: '2.8'
INVALID_TASK_ATTRIBUTE_FAILED:
name: Controls whether invalid attributes for a task result in errors instead of warnings
default: True
description: If 'false', invalid attributes for a task will result in warnings instead of errors
type: boolean
env:
- name: ANSIBLE_INVALID_TASK_ATTRIBUTE_FAILED
ini:
- key: invalid_task_attribute_failed
section: defaults
version_added: "2.7"
INVENTORY_ANY_UNPARSED_IS_FAILED:
name: Controls whether any unparseable inventory source is a fatal error
default: False
description: >
If 'true', it is a fatal error when any given inventory source
cannot be successfully parsed by any available inventory plugin;
otherwise, this situation only attracts a warning.
type: boolean
env: [{name: ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED}]
ini:
- {key: any_unparsed_is_failed, section: inventory}
version_added: "2.7"
INVENTORY_CACHE_ENABLED:
name: Inventory caching enabled
default: False
description:
- Toggle to turn on inventory caching.
- This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
- The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory configuration.
- This message will be removed in 2.16.
env: [{name: ANSIBLE_INVENTORY_CACHE}]
ini:
- {key: cache, section: inventory}
type: bool
INVENTORY_CACHE_PLUGIN:
name: Inventory cache plugin
description:
- The plugin for caching inventory.
- This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
- The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
- This message will be removed in 2.16.
env: [{name: ANSIBLE_INVENTORY_CACHE_PLUGIN}]
ini:
- {key: cache_plugin, section: inventory}
INVENTORY_CACHE_PLUGIN_CONNECTION:
name: Inventory cache plugin URI to override the defaults section
description:
- The inventory cache connection.
- This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
- The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
- This message will be removed in 2.16.
env: [{name: ANSIBLE_INVENTORY_CACHE_CONNECTION}]
ini:
- {key: cache_connection, section: inventory}
INVENTORY_CACHE_PLUGIN_PREFIX:
name: Inventory cache plugin table prefix
description:
- The table prefix for the cache plugin.
- This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
- The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
- This message will be removed in 2.16.
env: [{name: ANSIBLE_INVENTORY_CACHE_PLUGIN_PREFIX}]
default: ansible_inventory_
ini:
- {key: cache_prefix, section: inventory}
INVENTORY_CACHE_TIMEOUT:
name: Inventory cache plugin expiration timeout
description:
- Expiration timeout for the inventory cache plugin data.
- This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
- The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
- This message will be removed in 2.16.
default: 3600
env: [{name: ANSIBLE_INVENTORY_CACHE_TIMEOUT}]
ini:
- {key: cache_timeout, section: inventory}
INVENTORY_ENABLED:
name: Active Inventory plugins
default: ['host_list', 'script', 'auto', 'yaml', 'ini', 'toml']
description: List of enabled inventory plugins, it also determines the order in which they are used.
env: [{name: ANSIBLE_INVENTORY_ENABLED}]
ini:
- {key: enable_plugins, section: inventory}
type: list
INVENTORY_EXPORT:
name: Set ansible-inventory into export mode
default: False
description: Controls if ansible-inventory will accurately reflect Ansible's view into inventory or its optimized for exporting.
env: [{name: ANSIBLE_INVENTORY_EXPORT}]
ini:
- {key: export, section: inventory}
type: bool
INVENTORY_IGNORE_EXTS:
name: Inventory ignore extensions
default: "{{(REJECT_EXTS + ('.orig', '.ini', '.cfg', '.retry'))}}"
description: List of extensions to ignore when using a directory as an inventory source
env: [{name: ANSIBLE_INVENTORY_IGNORE}]
ini:
- {key: inventory_ignore_extensions, section: defaults}
- {key: ignore_extensions, section: inventory}
type: list
INVENTORY_IGNORE_PATTERNS:
name: Inventory ignore patterns
default: []
description: List of patterns to ignore when using a directory as an inventory source
env: [{name: ANSIBLE_INVENTORY_IGNORE_REGEX}]
ini:
- {key: inventory_ignore_patterns, section: defaults}
- {key: ignore_patterns, section: inventory}
type: list
INVENTORY_UNPARSED_IS_FAILED:
name: Unparsed Inventory failure
default: False
description: >
If 'true' it is a fatal error if every single potential inventory
source fails to parse, otherwise this situation will only attract a
warning.
env: [{name: ANSIBLE_INVENTORY_UNPARSED_FAILED}]
ini:
- {key: unparsed_is_failed, section: inventory}
type: bool
JINJA2_NATIVE_WARNING:
name: Running older than required Jinja version for jinja2_native warning
default: True
description: Toggle to control showing warnings related to running a Jinja version
older than required for jinja2_native
env:
- name: ANSIBLE_JINJA2_NATIVE_WARNING
deprecated:
why: This option is no longer used in the Ansible Core code base.
version: "2.17"
ini:
- {key: jinja2_native_warning, section: defaults}
type: boolean
MAX_FILE_SIZE_FOR_DIFF:
name: Diff maximum file size
default: 104448
description: Maximum size of files to be considered for diff display
env: [{name: ANSIBLE_MAX_DIFF_SIZE}]
ini:
- {key: max_diff_size, section: defaults}
type: int
NETWORK_GROUP_MODULES:
name: Network module families
default: [eos, nxos, ios, iosxr, junos, enos, ce, vyos, sros, dellos9, dellos10, dellos6, asa, aruba, aireos, bigip, ironware, onyx, netconf, exos, voss, slxos]
description: 'TODO: write it'
env:
- name: ANSIBLE_NETWORK_GROUP_MODULES
ini:
- {key: network_group_modules, section: defaults}
type: list
yaml: {key: defaults.network_group_modules}
INJECT_FACTS_AS_VARS:
default: True
description:
- Facts are available inside the `ansible_facts` variable, this setting also pushes them as their own vars in the main namespace.
- Unlike inside the `ansible_facts` dictionary, these will have an `ansible_` prefix.
env: [{name: ANSIBLE_INJECT_FACT_VARS}]
ini:
- {key: inject_facts_as_vars, section: defaults}
type: boolean
version_added: "2.5"
MODULE_IGNORE_EXTS:
name: Module ignore extensions
default: "{{(REJECT_EXTS + ('.yaml', '.yml', '.ini'))}}"
description:
- List of extensions to ignore when looking for modules to load
- This is for rejecting script and binary module fallback extensions
env: [{name: ANSIBLE_MODULE_IGNORE_EXTS}]
ini:
- {key: module_ignore_exts, section: defaults}
type: list
OLD_PLUGIN_CACHE_CLEARING:
description: Previously Ansible would only clear some of the plugin loading caches when loading new roles, this led to some behaviours in which a plugin loaded in prevoius plays would be unexpectedly 'sticky'. This setting allows to return to that behaviour.
env: [{name: ANSIBLE_OLD_PLUGIN_CACHE_CLEAR}]
ini:
- {key: old_plugin_cache_clear, section: defaults}
type: boolean
default: False
version_added: "2.8"
PARAMIKO_HOST_KEY_AUTO_ADD:
# TODO: move to plugin
default: False
description: 'TODO: write it'
env: [{name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD}]
ini:
- {key: host_key_auto_add, section: paramiko_connection}
type: boolean
PARAMIKO_LOOK_FOR_KEYS:
name: look for keys
default: True
description: 'TODO: write it'
env: [{name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS}]
ini:
- {key: look_for_keys, section: paramiko_connection}
type: boolean
PERSISTENT_CONTROL_PATH_DIR:
name: Persistence socket path
default: '{{ ANSIBLE_HOME ~ "/pc" }}'
description: Path to socket to be used by the connection persistence system.
env: [{name: ANSIBLE_PERSISTENT_CONTROL_PATH_DIR}]
ini:
- {key: control_path_dir, section: persistent_connection}
type: path
PERSISTENT_CONNECT_TIMEOUT:
name: Persistence timeout
default: 30
description: This controls how long the persistent connection will remain idle before it is destroyed.
env: [{name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT}]
ini:
- {key: connect_timeout, section: persistent_connection}
type: integer
PERSISTENT_CONNECT_RETRY_TIMEOUT:
name: Persistence connection retry timeout
default: 15
description: This controls the retry timeout for persistent connection to connect to the local domain socket.
env: [{name: ANSIBLE_PERSISTENT_CONNECT_RETRY_TIMEOUT}]
ini:
- {key: connect_retry_timeout, section: persistent_connection}
type: integer
PERSISTENT_COMMAND_TIMEOUT:
name: Persistence command timeout
default: 30
description: This controls the amount of time to wait for response from remote device before timing out persistent connection.
env: [{name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT}]
ini:
- {key: command_timeout, section: persistent_connection}
type: int
PLAYBOOK_DIR:
name: playbook dir override for non-playbook CLIs (ala --playbook-dir)
version_added: "2.9"
description:
- A number of non-playbook CLIs have a ``--playbook-dir`` argument; this sets the default value for it.
env: [{name: ANSIBLE_PLAYBOOK_DIR}]
ini: [{key: playbook_dir, section: defaults}]
type: path
PLAYBOOK_VARS_ROOT:
name: playbook vars files root
default: top
version_added: "2.4.1"
description:
- This sets which playbook dirs will be used as a root to process vars plugins, which includes finding host_vars/group_vars
env: [{name: ANSIBLE_PLAYBOOK_VARS_ROOT}]
ini:
- {key: playbook_vars_root, section: defaults}
choices:
top: follows the traditional behavior of using the top playbook in the chain to find the root directory.
bottom: follows the 2.4.0 behavior of using the current playbook to find the root directory.
all: examines from the first parent to the current playbook.
PLUGIN_FILTERS_CFG:
name: Config file for limiting valid plugins
default: null
version_added: "2.5.0"
description:
- "A path to configuration for filtering which plugins installed on the system are allowed to be used."
- "See :ref:`plugin_filtering_config` for details of the filter file's format."
- " The default is /etc/ansible/plugin_filters.yml"
ini:
- key: plugin_filters_cfg
section: defaults
type: path
PYTHON_MODULE_RLIMIT_NOFILE:
name: Adjust maximum file descriptor soft limit during Python module execution
description:
- Attempts to set RLIMIT_NOFILE soft limit to the specified value when executing Python modules (can speed up subprocess usage on
Python 2.x. See https://bugs.python.org/issue11284). The value will be limited by the existing hard limit. Default
value of 0 does not attempt to adjust existing system-defined limits.
default: 0
env:
- {name: ANSIBLE_PYTHON_MODULE_RLIMIT_NOFILE}
ini:
- {key: python_module_rlimit_nofile, section: defaults}
vars:
- {name: ansible_python_module_rlimit_nofile}
version_added: '2.8'
RETRY_FILES_ENABLED:
name: Retry files
default: False
description: This controls whether a failed Ansible playbook should create a .retry file.
env: [{name: ANSIBLE_RETRY_FILES_ENABLED}]
ini:
- {key: retry_files_enabled, section: defaults}
type: bool
RETRY_FILES_SAVE_PATH:
name: Retry files path
default: ~
description:
- This sets the path in which Ansible will save .retry files when a playbook fails and retry files are enabled.
- This file will be overwritten after each run with the list of failed hosts from all plays.
env: [{name: ANSIBLE_RETRY_FILES_SAVE_PATH}]
ini:
- {key: retry_files_save_path, section: defaults}
type: path
RUN_VARS_PLUGINS:
name: When should vars plugins run relative to inventory
default: demand
description:
- This setting can be used to optimize vars_plugin usage depending on user's inventory size and play selection.
env: [{name: ANSIBLE_RUN_VARS_PLUGINS}]
ini:
- {key: run_vars_plugins, section: defaults}
type: str
choices:
demand: will run vars_plugins relative to inventory sources anytime vars are 'demanded' by tasks.
start: will run vars_plugins relative to inventory sources after importing that inventory source.
version_added: "2.10"
SHOW_CUSTOM_STATS:
name: Display custom stats
default: False
description: 'This adds the custom stats set via the set_stats plugin to the default output'
env: [{name: ANSIBLE_SHOW_CUSTOM_STATS}]
ini:
- {key: show_custom_stats, section: defaults}
type: bool
STRING_TYPE_FILTERS:
name: Filters to preserve strings
default: [string, to_json, to_nice_json, to_yaml, to_nice_yaml, ppretty, json]
description:
- "This list of filters avoids 'type conversion' when templating variables"
- Useful when you want to avoid conversion into lists or dictionaries for JSON strings, for example.
env: [{name: ANSIBLE_STRING_TYPE_FILTERS}]
ini:
- {key: dont_type_filters, section: jinja2}
type: list
SYSTEM_WARNINGS:
name: System warnings
default: True
description:
- Allows disabling of warnings related to potential issues on the system running ansible itself (not on the managed hosts)
- These may include warnings about 3rd party packages or other conditions that should be resolved if possible.
env: [{name: ANSIBLE_SYSTEM_WARNINGS}]
ini:
- {key: system_warnings, section: defaults}
type: boolean
TAGS_RUN:
name: Run Tags
default: []
type: list
description: default list of tags to run in your plays, Skip Tags has precedence.
env: [{name: ANSIBLE_RUN_TAGS}]
ini:
- {key: run, section: tags}
version_added: "2.5"
TAGS_SKIP:
name: Skip Tags
default: []
type: list
description: default list of tags to skip in your plays, has precedence over Run Tags
env: [{name: ANSIBLE_SKIP_TAGS}]
ini:
- {key: skip, section: tags}
version_added: "2.5"
TASK_TIMEOUT:
name: Task Timeout
default: 0
description:
- Set the maximum time (in seconds) that a task can run for.
- If set to 0 (the default) there is no timeout.
env: [{name: ANSIBLE_TASK_TIMEOUT}]
ini:
- {key: task_timeout, section: defaults}
type: integer
version_added: '2.10'
WORKER_SHUTDOWN_POLL_COUNT:
name: Worker Shutdown Poll Count
default: 0
description:
- The maximum number of times to check Task Queue Manager worker processes to verify they have exited cleanly.
- After this limit is reached any worker processes still running will be terminated.
- This is for internal use only.
env: [{name: ANSIBLE_WORKER_SHUTDOWN_POLL_COUNT}]
type: integer
version_added: '2.10'
WORKER_SHUTDOWN_POLL_DELAY:
name: Worker Shutdown Poll Delay
default: 0.1
description:
- The number of seconds to sleep between polling loops when checking Task Queue Manager worker processes to verify they have exited cleanly.
- This is for internal use only.
env: [{name: ANSIBLE_WORKER_SHUTDOWN_POLL_DELAY}]
type: float
version_added: '2.10'
USE_PERSISTENT_CONNECTIONS:
name: Persistence
default: False
description: Toggles the use of persistence for connections.
env: [{name: ANSIBLE_USE_PERSISTENT_CONNECTIONS}]
ini:
- {key: use_persistent_connections, section: defaults}
type: boolean
VARIABLE_PLUGINS_ENABLED:
name: Vars plugin enabled list
default: ['host_group_vars']
description: Accept list for variable plugins that require it.
env: [{name: ANSIBLE_VARS_ENABLED}]
ini:
- {key: vars_plugins_enabled, section: defaults}
type: list
version_added: "2.10"
VARIABLE_PRECEDENCE:
name: Group variable precedence
default: ['all_inventory', 'groups_inventory', 'all_plugins_inventory', 'all_plugins_play', 'groups_plugins_inventory', 'groups_plugins_play']
description: Allows to change the group variable precedence merge order.
env: [{name: ANSIBLE_PRECEDENCE}]
ini:
- {key: precedence, section: defaults}
type: list
version_added: "2.4"
WIN_ASYNC_STARTUP_TIMEOUT:
name: Windows Async Startup Timeout
default: 5
description:
- For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling),
this is how long, in seconds, to wait for the task spawned by Ansible to connect back to the named pipe used
on Windows systems. The default is 5 seconds. This can be too low on slower systems, or systems under heavy load.
- This is not the total time an async command can run for, but is a separate timeout to wait for an async command to
start. The task will only start to be timed against its async_timeout once it has connected to the pipe, so the
overall maximum duration the task can take will be extended by the amount specified here.
env: [{name: ANSIBLE_WIN_ASYNC_STARTUP_TIMEOUT}]
ini:
- {key: win_async_startup_timeout, section: defaults}
type: integer
vars:
- {name: ansible_win_async_startup_timeout}
version_added: '2.10'
YAML_FILENAME_EXTENSIONS:
name: Valid YAML extensions
default: [".yml", ".yaml", ".json"]
description:
- "Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these."
- 'This affects vars_files, include_vars, inventory and vars plugins among others.'
env:
- name: ANSIBLE_YAML_FILENAME_EXT
ini:
- section: defaults
key: yaml_valid_extensions
type: list
NETCONF_SSH_CONFIG:
description: This variable is used to enable bastion/jump host with netconf connection. If set to True the bastion/jump
host ssh settings should be present in ~/.ssh/config file, alternatively it can be set
to custom ssh configuration file path to read the bastion/jump host settings.
env: [{name: ANSIBLE_NETCONF_SSH_CONFIG}]
ini:
- {key: ssh_config, section: netconf_connection}
yaml: {key: netconf_connection.ssh_config}
default: null
STRING_CONVERSION_ACTION:
version_added: '2.8'
description:
- Action to take when a module parameter value is converted to a string (this does not affect variables).
For string parameters, values such as '1.00', "['a', 'b',]", and 'yes', 'y', etc.
will be converted by the YAML parser unless fully quoted.
- Valid options are 'error', 'warn', and 'ignore'.
- Since 2.8, this option defaults to 'warn' but will change to 'error' in 2.12.
default: 'warn'
env:
- name: ANSIBLE_STRING_CONVERSION_ACTION
ini:
- section: defaults
key: string_conversion_action
type: string
VALIDATE_ACTION_GROUP_METADATA:
version_added: '2.12'
description:
- A toggle to disable validating a collection's 'metadata' entry for a module_defaults action group.
Metadata containing unexpected fields or value types will produce a warning when this is True.
default: True
env: [{name: ANSIBLE_VALIDATE_ACTION_GROUP_METADATA}]
ini:
- section: defaults
key: validate_action_group_metadata
type: bool
VERBOSE_TO_STDERR:
version_added: '2.8'
description:
- Force 'verbose' option to use stderr instead of stdout
default: False
env:
- name: ANSIBLE_VERBOSE_TO_STDERR
ini:
- section: defaults
key: verbose_to_stderr
type: bool
...
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,493 |
Unable to set `null_representation` config
|
### Summary
Trying to set the `null_representation` (https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-null-representation) settings results in something like:
```text
ansible.errors.AnsibleError: Invalid settings supplied for DEFAULT_NULL_REPRESENTATION: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
. Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
```
https://github.com/ansible/ansible/blob/v2.12.1/lib/ansible/config/base.yml#L952 specifies `type: none`
and https://github.com/ansible/ansible/blob/v2.12.1/lib/ansible/config/manager.py#L99 enforces this type, but this wasn't the case in early commits as I understand.
Some history I could find:
* https://github.com/ansible/ansible/commit/892e230514090dc9221ee01d289c3532aa6ef260
* https://github.com/ansible/ansible/commit/9f6bbf8c2f02cdc70df149095a6623ee0c2c7ba7
* https://github.com/ansible/ansible/commit/74842adc07edb248f9b544389ce4093b9149f195
Also, could this evolve so that the config value could be overridden by local variables and/or by '#jinja2:' (by setting this value in the Jinja environment in this case), and not just globally (https://github.com/ansible/ansible/blob/v2.11.6/lib/ansible/template/__init__.py#L809)?
### Issue Type
Bug Report
### Component Name
config
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.7]
python version = 3.8
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
CentOS 7
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
* Create ansible.cfg with this content:
```yaml (paste below)
[defaults]
null_representation = null
```
* Run `ansible-config dump --only-changed`
### Expected Results
Something like:
```text
$ ansible-config dump --only-changed
DEFAULT_NULL_REPRESENTATION(/my/ansible.cfg) = null
```
### Actual Results
```console
Unhandled error:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 543, in get_config_value_and_origin
value = ensure_type(value, defs[config].get('type'), origin=origin)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 162, in ensure_type
raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
ValueError: Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 605, in update_config_data
value, origin = self.get_config_value_and_origin(config, configfile)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 550, in get_config_value_and_origin
raise AnsibleOptionsError('Invalid type for configuration option %s: %s' %
ansible.errors.AnsibleOptionsError: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 543, in get_config_value_and_origin
value = ensure_type(value, defs[config].get('type'), origin=origin)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 162, in ensure_type
raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
ValueError: Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 605, in update_config_data
value, origin = self.get_config_value_and_origin(config, configfile)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 550, in get_config_value_and_origin
raise AnsibleOptionsError('Invalid type for configuration option %s: %s' %
ansible.errors.AnsibleOptionsError: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/bin/ansible-config", line 63, in <module>
import ansible.constants as C
File "/my/venv/lib/python3.8/site-packages/ansible/constants.py", line 181, in <module>
config = ConfigManager()
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 310, in __init__
self.update_config_data()
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 617, in update_config_data
raise AnsibleError("Invalid settings supplied for %s: %s\n" % (config, to_native(e)), orig_exc=e)
ansible.errors.AnsibleError: Invalid settings supplied for DEFAULT_NULL_REPRESENTATION: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
. Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76493
|
https://github.com/ansible/ansible/pull/78451
|
3a59cb25f486eb6c633995f5fe01413c0ed42116
|
0de44804679461b8d898129068183d6da416e3a7
| 2021-12-07T18:08:25Z |
python
| 2022-08-11T20:07:57Z |
test/integration/targets/template/badnull1.cfg
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,493 |
Unable to set `null_representation` config
|
### Summary
Trying to set the `null_representation` (https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-null-representation) settings results in something like:
```text
ansible.errors.AnsibleError: Invalid settings supplied for DEFAULT_NULL_REPRESENTATION: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
. Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
```
https://github.com/ansible/ansible/blob/v2.12.1/lib/ansible/config/base.yml#L952 specifies `type: none`
and https://github.com/ansible/ansible/blob/v2.12.1/lib/ansible/config/manager.py#L99 enforces this type, but this wasn't the case in early commits as I understand.
Some history I could find:
* https://github.com/ansible/ansible/commit/892e230514090dc9221ee01d289c3532aa6ef260
* https://github.com/ansible/ansible/commit/9f6bbf8c2f02cdc70df149095a6623ee0c2c7ba7
* https://github.com/ansible/ansible/commit/74842adc07edb248f9b544389ce4093b9149f195
Also, could this evolve so that the config value could be overridden by local variables and/or by '#jinja2:' (by setting this value in the Jinja environment in this case), and not just globally (https://github.com/ansible/ansible/blob/v2.11.6/lib/ansible/template/__init__.py#L809)?
### Issue Type
Bug Report
### Component Name
config
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.7]
python version = 3.8
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
CentOS 7
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
* Create ansible.cfg with this content:
```yaml (paste below)
[defaults]
null_representation = null
```
* Run `ansible-config dump --only-changed`
### Expected Results
Something like:
```text
$ ansible-config dump --only-changed
DEFAULT_NULL_REPRESENTATION(/my/ansible.cfg) = null
```
### Actual Results
```console
Unhandled error:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 543, in get_config_value_and_origin
value = ensure_type(value, defs[config].get('type'), origin=origin)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 162, in ensure_type
raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
ValueError: Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 605, in update_config_data
value, origin = self.get_config_value_and_origin(config, configfile)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 550, in get_config_value_and_origin
raise AnsibleOptionsError('Invalid type for configuration option %s: %s' %
ansible.errors.AnsibleOptionsError: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 543, in get_config_value_and_origin
value = ensure_type(value, defs[config].get('type'), origin=origin)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 162, in ensure_type
raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
ValueError: Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 605, in update_config_data
value, origin = self.get_config_value_and_origin(config, configfile)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 550, in get_config_value_and_origin
raise AnsibleOptionsError('Invalid type for configuration option %s: %s' %
ansible.errors.AnsibleOptionsError: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/bin/ansible-config", line 63, in <module>
import ansible.constants as C
File "/my/venv/lib/python3.8/site-packages/ansible/constants.py", line 181, in <module>
config = ConfigManager()
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 310, in __init__
self.update_config_data()
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 617, in update_config_data
raise AnsibleError("Invalid settings supplied for %s: %s\n" % (config, to_native(e)), orig_exc=e)
ansible.errors.AnsibleError: Invalid settings supplied for DEFAULT_NULL_REPRESENTATION: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
. Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76493
|
https://github.com/ansible/ansible/pull/78451
|
3a59cb25f486eb6c633995f5fe01413c0ed42116
|
0de44804679461b8d898129068183d6da416e3a7
| 2021-12-07T18:08:25Z |
python
| 2022-08-11T20:07:57Z |
test/integration/targets/template/badnull2.cfg
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,493 |
Unable to set `null_representation` config
|
### Summary
Trying to set the `null_representation` (https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-null-representation) settings results in something like:
```text
ansible.errors.AnsibleError: Invalid settings supplied for DEFAULT_NULL_REPRESENTATION: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
. Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
```
https://github.com/ansible/ansible/blob/v2.12.1/lib/ansible/config/base.yml#L952 specifies `type: none`
and https://github.com/ansible/ansible/blob/v2.12.1/lib/ansible/config/manager.py#L99 enforces this type, but this wasn't the case in early commits as I understand.
Some history I could find:
* https://github.com/ansible/ansible/commit/892e230514090dc9221ee01d289c3532aa6ef260
* https://github.com/ansible/ansible/commit/9f6bbf8c2f02cdc70df149095a6623ee0c2c7ba7
* https://github.com/ansible/ansible/commit/74842adc07edb248f9b544389ce4093b9149f195
Also, could this evolve so that the config value could be overridden by local variables and/or by '#jinja2:' (by setting this value in the Jinja environment in this case), and not just globally (https://github.com/ansible/ansible/blob/v2.11.6/lib/ansible/template/__init__.py#L809)?
### Issue Type
Bug Report
### Component Name
config
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.7]
python version = 3.8
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
CentOS 7
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
* Create ansible.cfg with this content:
```yaml (paste below)
[defaults]
null_representation = null
```
* Run `ansible-config dump --only-changed`
### Expected Results
Something like:
```text
$ ansible-config dump --only-changed
DEFAULT_NULL_REPRESENTATION(/my/ansible.cfg) = null
```
### Actual Results
```console
Unhandled error:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 543, in get_config_value_and_origin
value = ensure_type(value, defs[config].get('type'), origin=origin)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 162, in ensure_type
raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
ValueError: Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 605, in update_config_data
value, origin = self.get_config_value_and_origin(config, configfile)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 550, in get_config_value_and_origin
raise AnsibleOptionsError('Invalid type for configuration option %s: %s' %
ansible.errors.AnsibleOptionsError: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 543, in get_config_value_and_origin
value = ensure_type(value, defs[config].get('type'), origin=origin)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 162, in ensure_type
raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
ValueError: Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 605, in update_config_data
value, origin = self.get_config_value_and_origin(config, configfile)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 550, in get_config_value_and_origin
raise AnsibleOptionsError('Invalid type for configuration option %s: %s' %
ansible.errors.AnsibleOptionsError: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/bin/ansible-config", line 63, in <module>
import ansible.constants as C
File "/my/venv/lib/python3.8/site-packages/ansible/constants.py", line 181, in <module>
config = ConfigManager()
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 310, in __init__
self.update_config_data()
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 617, in update_config_data
raise AnsibleError("Invalid settings supplied for %s: %s\n" % (config, to_native(e)), orig_exc=e)
ansible.errors.AnsibleError: Invalid settings supplied for DEFAULT_NULL_REPRESENTATION: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
. Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76493
|
https://github.com/ansible/ansible/pull/78451
|
3a59cb25f486eb6c633995f5fe01413c0ed42116
|
0de44804679461b8d898129068183d6da416e3a7
| 2021-12-07T18:08:25Z |
python
| 2022-08-11T20:07:57Z |
test/integration/targets/template/badnull3.cfg
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,493 |
Unable to set `null_representation` config
|
### Summary
Trying to set the `null_representation` (https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-null-representation) settings results in something like:
```text
ansible.errors.AnsibleError: Invalid settings supplied for DEFAULT_NULL_REPRESENTATION: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
. Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
```
https://github.com/ansible/ansible/blob/v2.12.1/lib/ansible/config/base.yml#L952 specifies `type: none`
and https://github.com/ansible/ansible/blob/v2.12.1/lib/ansible/config/manager.py#L99 enforces this type, but this wasn't the case in early commits as I understand.
Some history I could find:
* https://github.com/ansible/ansible/commit/892e230514090dc9221ee01d289c3532aa6ef260
* https://github.com/ansible/ansible/commit/9f6bbf8c2f02cdc70df149095a6623ee0c2c7ba7
* https://github.com/ansible/ansible/commit/74842adc07edb248f9b544389ce4093b9149f195
Also, could this evolve so that the config value could be overridden by local variables and/or by '#jinja2:' (by setting this value in the Jinja environment in this case), and not just globally (https://github.com/ansible/ansible/blob/v2.11.6/lib/ansible/template/__init__.py#L809)?
### Issue Type
Bug Report
### Component Name
config
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.7]
python version = 3.8
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
CentOS 7
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
* Create ansible.cfg with this content:
```yaml (paste below)
[defaults]
null_representation = null
```
* Run `ansible-config dump --only-changed`
### Expected Results
Something like:
```text
$ ansible-config dump --only-changed
DEFAULT_NULL_REPRESENTATION(/my/ansible.cfg) = null
```
### Actual Results
```console
Unhandled error:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 543, in get_config_value_and_origin
value = ensure_type(value, defs[config].get('type'), origin=origin)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 162, in ensure_type
raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
ValueError: Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 605, in update_config_data
value, origin = self.get_config_value_and_origin(config, configfile)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 550, in get_config_value_and_origin
raise AnsibleOptionsError('Invalid type for configuration option %s: %s' %
ansible.errors.AnsibleOptionsError: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 543, in get_config_value_and_origin
value = ensure_type(value, defs[config].get('type'), origin=origin)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 162, in ensure_type
raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
ValueError: Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 605, in update_config_data
value, origin = self.get_config_value_and_origin(config, configfile)
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 550, in get_config_value_and_origin
raise AnsibleOptionsError('Invalid type for configuration option %s: %s' %
ansible.errors.AnsibleOptionsError: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/my/venv/bin/ansible-config", line 63, in <module>
import ansible.constants as C
File "/my/venv/lib/python3.8/site-packages/ansible/constants.py", line 181, in <module>
config = ConfigManager()
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 310, in __init__
self.update_config_data()
File "/my/venv/lib/python3.8/site-packages/ansible/config/manager.py", line 617, in update_config_data
raise AnsibleError("Invalid settings supplied for %s: %s\n" % (config, to_native(e)), orig_exc=e)
ansible.errors.AnsibleError: Invalid settings supplied for DEFAULT_NULL_REPRESENTATION: Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
. Invalid type for configuration option setting: DEFAULT_NULL_REPRESENTATION : Invalid type provided for "None": "null"
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76493
|
https://github.com/ansible/ansible/pull/78451
|
3a59cb25f486eb6c633995f5fe01413c0ed42116
|
0de44804679461b8d898129068183d6da416e3a7
| 2021-12-07T18:08:25Z |
python
| 2022-08-11T20:07:57Z |
test/integration/targets/template/runme.sh
|
#!/usr/bin/env bash
set -eux
ANSIBLE_ROLES_PATH=../ ansible-playbook template.yml -i ../../inventory -v "$@"
# Test for https://github.com/ansible/ansible/pull/35571
ansible testhost -i testhost, -m debug -a 'msg={{ hostvars["localhost"] }}' -e "vars1={{ undef() }}" -e "vars2={{ vars1 }}"
# Test for https://github.com/ansible/ansible/issues/27262
ansible-playbook ansible_managed.yml -c ansible_managed.cfg -i ../../inventory -v "$@"
# Test for #42585
ANSIBLE_ROLES_PATH=../ ansible-playbook custom_template.yml -i ../../inventory -v "$@"
# Test for several corner cases #57188
ansible-playbook corner_cases.yml -v "$@"
# Test for #57351
ansible-playbook filter_plugins.yml -v "$@"
# https://github.com/ansible/ansible/issues/68699
ansible-playbook unused_vars_include.yml -v "$@"
# https://github.com/ansible/ansible/issues/55152
ansible-playbook undefined_var_info.yml -v "$@"
# https://github.com/ansible/ansible/issues/72615
ansible-playbook 72615.yml -v "$@"
# https://github.com/ansible/ansible/issues/6653
ansible-playbook 6653.yml -v "$@"
# https://github.com/ansible/ansible/issues/72262
ansible-playbook 72262.yml -v "$@"
# ensure unsafe is preserved, even with extra newlines
ansible-playbook unsafe.yml -v "$@"
# ensure Jinja2 overrides from a template are used
ansible-playbook in_template_overrides.yml -v "$@"
ansible-playbook lazy_eval.yml -i ../../inventory -v "$@"
ansible-playbook undefined_in_import.yml -i ../../inventory -v "$@"
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,485 |
Remove FAQ about not having Ansible as wheels
|
### Summary
Ansible as wheels has been supported since 2.13 so remove the FAQ entry at https://docs.ansible.com/ansible/latest/reference_appendices/faq.html#why-don-t-you-ship-ansible-in-wheel-format-or-other-packaging-format
### Issue Type
Documentation Report
### Component Name
docs/docsite/rst/reference_appendices/faq.rst
### Ansible Version
```console
$ ansible --version
2.13
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
none
```
### OS / Environment
none
### Additional Information
none
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78485
|
https://github.com/ansible/ansible/pull/78524
|
135f95fb2fae05667e0b7382183693cda394aa9e
|
7f69629fa787fb5242534111cec2fb4772e3f1b9
| 2022-08-09T20:33:32Z |
python
| 2022-08-11T20:54:21Z |
docs/docsite/rst/reference_appendices/faq.rst
|
.. _ansible_faq:
Frequently Asked Questions
==========================
Here are some commonly asked questions and their answers.
.. _collections_transition:
Where did all the modules go?
+++++++++++++++++++++++++++++
In July, 2019, we announced that collections would be the `future of Ansible content delivery <https://www.ansible.com/blog/the-future-of-ansible-content-delivery>`_. A collection is a distribution format for Ansible content that can include playbooks, roles, modules, and plugins. In Ansible 2.9 we added support for collections. In Ansible 2.10 we `extracted most modules from the main ansible/ansible repository <https://access.redhat.com/solutions/5295121>`_ and placed them in :ref:`collections <list_of_collections>`. Collections may be maintained by the Ansible team, by the Ansible community, or by Ansible partners. The `ansible/ansible repository <https://github.com/ansible/ansible>`_ now contains the code for basic features and functions, such as copying module code to managed nodes. This code is also known as ``ansible-core`` (it was briefly called ``ansible-base`` for version 2.10).
* To learn more about using collections, see :ref:`collections`.
* To learn more about developing collections, see :ref:`developing_collections`.
* To learn more about contributing to existing collections, see the individual collection repository for guidelines, or see :ref:`contributing_maintained_collections` to contribute to one of the Ansible-maintained collections.
.. _find_my_module:
Where did this specific module go?
++++++++++++++++++++++++++++++++++
IF you are searching for a specific module, you can check the `runtime.yml <https://github.com/ansible/ansible/blob/devel/lib/ansible/config/ansible_builtin_runtime.yml>`_ file, which lists the first destination for each module that we extracted from the main ansible/ansible repository. Some modules have moved again since then. You can also search on `Ansible Galaxy <https://galaxy.ansible.com/>`_ or ask on one of our :ref:`chat channels <communication_irc>`.
.. _set_environment:
How can I set the PATH or any other environment variable for a task or entire play?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Setting environment variables can be done with the `environment` keyword. It can be used at the task or other levels in the play.
.. code-block:: yaml
shell:
cmd: date
environment:
LANG=fr_FR.UTF-8
.. code-block:: yaml
hosts: servers
environment:
PATH: "{{ ansible_env.PATH }}:/thingy/bin"
SOME: value
.. note:: starting in 2.0.1 the setup task from ``gather_facts`` also inherits the environment directive from the play, you might need to use the ``|default`` filter to avoid errors if setting this at play level.
.. _faq_setting_users_and_ports:
How do I handle different machines needing different user accounts or ports to log in with?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Setting inventory variables in the inventory file is the easiest way.
For instance, suppose these hosts have different usernames and ports:
.. code-block:: ini
[webservers]
asdf.example.com ansible_port=5000 ansible_user=alice
jkl.example.com ansible_port=5001 ansible_user=bob
You can also dictate the connection type to be used, if you want:
.. code-block:: ini
[testcluster]
localhost ansible_connection=local
/path/to/chroot1 ansible_connection=chroot
foo.example.com ansible_connection=paramiko
You may also wish to keep these in group variables instead, or file them in a group_vars/<groupname> file.
See the rest of the documentation for more information about how to organize variables.
.. _use_ssh:
How do I get ansible to reuse connections, enable Kerberized SSH, or have Ansible pay attention to my local SSH config file?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Switch your default connection type in the configuration file to ``ssh``, or use ``-c ssh`` to use
Native OpenSSH for connections instead of the python paramiko library. In Ansible 1.2.1 and later, ``ssh`` will be used
by default if OpenSSH is new enough to support ControlPersist as an option.
Paramiko is great for starting out, but the OpenSSH type offers many advanced options. You will want to run Ansible
from a machine new enough to support ControlPersist, if you are using this connection type. You can still manage
older clients. If you are using RHEL 6, CentOS 6, SLES 10 or SLES 11 the version of OpenSSH is still a bit old, so
consider managing from a Fedora or openSUSE client even though you are managing older nodes, or just use paramiko.
We keep paramiko as the default as if you are first installing Ansible on these enterprise operating systems, it offers a better experience for new users.
.. _use_ssh_jump_hosts:
How do I configure a jump host to access servers that I have no direct access to?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
You can set a ``ProxyCommand`` in the
``ansible_ssh_common_args`` inventory variable. Any arguments specified in
this variable are added to the sftp/scp/ssh command line when connecting
to the relevant host(s). Consider the following inventory group:
.. code-block:: ini
[gatewayed]
foo ansible_host=192.0.2.1
bar ansible_host=192.0.2.2
You can create `group_vars/gatewayed.yml` with the following contents::
ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q [email protected]"'
Ansible will append these arguments to the command line when trying to
connect to any hosts in the group ``gatewayed``. (These arguments are used
in addition to any ``ssh_args`` from ``ansible.cfg``, so you do not need to
repeat global ``ControlPersist`` settings in ``ansible_ssh_common_args``.)
Note that ``ssh -W`` is available only with OpenSSH 5.4 or later. With
older versions, it's necessary to execute ``nc %h:%p`` or some equivalent
command on the bastion host.
With earlier versions of Ansible, it was necessary to configure a
suitable ``ProxyCommand`` for one or more hosts in ``~/.ssh/config``,
or globally by setting ``ssh_args`` in ``ansible.cfg``.
.. _ssh_serveraliveinterval:
How do I get Ansible to notice a dead target in a timely manner?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
You can add ``-o ServerAliveInterval=NumberOfSeconds`` in ``ssh_args`` from ``ansible.cfg``. Without this option,
SSH and therefore Ansible will wait until the TCP connection times out. Another solution is to add ``ServerAliveInterval``
into your global SSH configuration. A good value for ``ServerAliveInterval`` is up to you to decide; keep in mind that
``ServerAliveCountMax=3`` is the SSH default so any value you set will be tripled before terminating the SSH session.
.. _cloud_provider_performance:
How do I speed up run of ansible for servers from cloud providers (EC2, openstack,.. )?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Don't try to manage a fleet of machines of a cloud provider from your laptop.
Rather connect to a management node inside this cloud provider first and run Ansible from there.
.. _python_interpreters:
How do I handle not having a Python interpreter at /usr/bin/python on a remote machine?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
While you can write Ansible modules in any language, most Ansible modules are written in Python,
including the ones central to letting Ansible work.
By default, Ansible assumes it can find a :command:`/usr/bin/python` on your remote system that is
either Python2, version 2.6 or higher or Python3, 3.5 or higher.
Setting the inventory variable ``ansible_python_interpreter`` on any host will tell Ansible to
auto-replace the Python interpreter with that value instead. Thus, you can point to any Python you
want on the system if :command:`/usr/bin/python` on your system does not point to a compatible
Python interpreter.
Some platforms may only have Python 3 installed by default. If it is not installed as
:command:`/usr/bin/python`, you will need to configure the path to the interpreter via
``ansible_python_interpreter``. Although most core modules will work with Python 3, there may be some
special purpose ones which do not or you may encounter a bug in an edge case. As a temporary
workaround you can install Python 2 on the managed host and configure Ansible to use that Python via
``ansible_python_interpreter``. If there's no mention in the module's documentation that the module
requires Python 2, you can also report a bug on our `bug tracker
<https://github.com/ansible/ansible/issues>`_ so that the incompatibility can be fixed in a future release.
Do not replace the shebang lines of your python modules. Ansible will do this for you automatically at deploy time.
Also, this works for ANY interpreter, for example ruby: ``ansible_ruby_interpreter``, perl: ``ansible_perl_interpreter``, and so on,
so you can use this for custom modules written in any scripting language and control the interpreter location.
Keep in mind that if you put ``env`` in your module shebang line (``#!/usr/bin/env <other>``),
this facility will be ignored so you will be at the mercy of the remote `$PATH`.
.. _installation_faqs:
How do I handle the package dependencies required by Ansible package dependencies during Ansible installation ?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
While installing Ansible, sometimes you may encounter errors such as `No package 'libffi' found` or `fatal error: Python.h: No such file or directory`
These errors are generally caused by the missing packages, which are dependencies of the packages required by Ansible.
For example, `libffi` package is dependency of `pynacl` and `paramiko` (Ansible -> paramiko -> pynacl -> libffi).
In order to solve these kinds of dependency issues, you might need to install required packages using
the OS native package managers, such as `yum`, `dnf`, or `apt`, or as mentioned in the package installation guide.
Refer to the documentation of the respective package for such dependencies and their installation methods.
Common Platform Issues
++++++++++++++++++++++
What customer platforms does Red Hat support?
---------------------------------------------
A number of them! For a definitive list please see this `Knowledge Base article <https://access.redhat.com/articles/3168091>`_.
Running in a virtualenv
-----------------------
You can install Ansible into a virtualenv on the controller quite simply:
.. code-block:: shell
$ virtualenv ansible
$ source ./ansible/bin/activate
$ pip install ansible
If you want to run under Python 3 instead of Python 2 you may want to change that slightly:
.. code-block:: shell
$ virtualenv -p python3 ansible
$ source ./ansible/bin/activate
$ pip install ansible
If you need to use any libraries which are not available via pip (for instance, SELinux Python
bindings on systems such as Red Hat Enterprise Linux or Fedora that have SELinux enabled), then you
need to install them into the virtualenv. There are two methods:
* When you create the virtualenv, specify ``--system-site-packages`` to make use of any libraries
installed in the system's Python:
.. code-block:: shell
$ virtualenv ansible --system-site-packages
* Copy those files in manually from the system. For instance, for SELinux bindings you might do:
.. code-block:: shell
$ virtualenv ansible --system-site-packages
$ cp -r -v /usr/lib64/python3.*/site-packages/selinux/ ./py3-ansible/lib64/python3.*/site-packages/
$ cp -v /usr/lib64/python3.*/site-packages/*selinux*.so ./py3-ansible/lib64/python3.*/site-packages/
Running on macOS
----------------
When executing Ansible on a system with macOS as a controller machine one might encounter the following error:
.. error::
+[__NSCFConstantString initialize] may have been in progress in another thread when fork() was called. We cannot safely call it or ignore it in the fork() child process. Crashing instead. Set a breakpoint on objc_initializeAfterForkError to debug.
ERROR! A worker was found in a dead state
In general the recommended workaround is to set the following environment variable in your shell:
.. code-block:: shell
$ export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
Running on BSD
--------------
.. seealso:: :ref:`working_with_bsd`
Running on Solaris
------------------
By default, Solaris 10 and earlier run a non-POSIX shell which does not correctly expand the default
tmp directory Ansible uses ( :file:`~/.ansible/tmp`). If you see module failures on Solaris machines, this
is likely the problem. There are several workarounds:
* You can set ``remote_tmp`` to a path that will expand correctly with the shell you are using
(see the plugin documentation for :ref:`C shell<csh_shell>`, :ref:`fish shell<fish_shell>`,
and :ref:`Powershell<powershell_shell>`). For example, in the ansible config file you can set::
remote_tmp=$HOME/.ansible/tmp
In Ansible 2.5 and later, you can also set it per-host in inventory like this::
solaris1 ansible_remote_tmp=$HOME/.ansible/tmp
* You can set :ref:`ansible_shell_executable<ansible_shell_executable>` to the path to a POSIX compatible shell. For
instance, many Solaris hosts have a POSIX shell located at :file:`/usr/xpg4/bin/sh` so you can set
this in inventory like so::
solaris1 ansible_shell_executable=/usr/xpg4/bin/sh
(bash, ksh, and zsh should also be POSIX compatible if you have any of those installed).
Running on z/OS
---------------
There are a few common errors that one might run into when trying to execute Ansible on z/OS as a target.
* Version 2.7.6 of python for z/OS will not work with Ansible because it represents strings internally as EBCDIC.
To get around this limitation, download and install a later version of `python for z/OS <https://www.rocketsoftware.com/zos-open-source>`_ (2.7.13 or 3.6.1) that represents strings internally as ASCII. Version 2.7.13 is verified to work.
* When ``pipelining = False`` in `/etc/ansible/ansible.cfg` then Ansible modules are transferred in binary mode via sftp however execution of python fails with
.. error::
SyntaxError: Non-UTF-8 code starting with \'\\x83\' in file /a/user1/.ansible/tmp/ansible-tmp-1548232945.35-274513842609025/AnsiballZ_stat.py on line 1, but no encoding declared; see https://python.org/dev/peps/pep-0263/ for details
To fix it set ``pipelining = True`` in `/etc/ansible/ansible.cfg`.
* Python interpret cannot be found in default location ``/usr/bin/python`` on target host.
.. error::
/usr/bin/python: EDC5129I No such file or directory
To fix this set the path to the python installation in your inventory like so::
zos1 ansible_python_interpreter=/usr/lpp/python/python-2017-04-12-py27/python27/bin/python
* Start of python fails with ``The module libpython2.7.so was not found.``
.. error::
EE3501S The module libpython2.7.so was not found.
On z/OS, you must execute python from gnu bash. If gnu bash is installed at ``/usr/lpp/bash``, you can fix this in your inventory by specifying an ``ansible_shell_executable``::
zos1 ansible_shell_executable=/usr/lpp/bash/bin/bash
Running under fakeroot
----------------------
Some issues arise as ``fakeroot`` does not create a full nor POSIX compliant system by default.
It is known that it will not correctly expand the default tmp directory Ansible uses (:file:`~/.ansible/tmp`).
If you see module failures, this is likely the problem.
The simple workaround is to set ``remote_tmp`` to a path that will expand correctly (see documentation of the shell plugin you are using for specifics).
For example, in the ansible config file (or via environment variable) you can set::
remote_tmp=$HOME/.ansible/tmp
.. _use_roles:
What is the best way to make content reusable/redistributable?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
If you have not done so already, read all about "Roles" in the playbooks documentation. This helps you make playbook content
self-contained, and works well with things like git submodules for sharing content with others.
If some of these plugin types look strange to you, see the API documentation for more details about ways Ansible can be extended.
.. _configuration_file:
Where does the configuration file live and what can I configure in it?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
See :ref:`intro_configuration`.
.. _who_would_ever_want_to_disable_cowsay_but_ok_here_is_how:
How do I disable cowsay?
++++++++++++++++++++++++
If cowsay is installed, Ansible takes it upon itself to make your day happier when running playbooks. If you decide
that you would like to work in a professional cow-free environment, you can either uninstall cowsay, set ``nocows=1``
in ``ansible.cfg``, or set the :envvar:`ANSIBLE_NOCOWS` environment variable:
.. code-block:: shell-session
export ANSIBLE_NOCOWS=1
.. _browse_facts:
How do I see a list of all of the ansible\_ variables?
++++++++++++++++++++++++++++++++++++++++++++++++++++++
Ansible by default gathers "facts" about the machines under management, and these facts can be accessed in playbooks
and in templates. To see a list of all of the facts that are available about a machine, you can run the ``setup`` module
as an ad hoc action:
.. code-block:: shell-session
ansible -m setup hostname
This will print out a dictionary of all of the facts that are available for that particular host. You might want to pipe
the output to a pager.This does NOT include inventory variables or internal 'magic' variables. See the next question
if you need more than just 'facts'.
.. _browse_inventory_vars:
How do I see all the inventory variables defined for my host?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
By running the following command, you can see inventory variables for a host:
.. code-block:: shell-session
ansible-inventory --list --yaml
.. _browse_host_vars:
How do I see all the variables specific to my host?
+++++++++++++++++++++++++++++++++++++++++++++++++++
To see all host specific variables, which might include facts and other sources:
.. code-block:: shell-session
ansible -m debug -a "var=hostvars['hostname']" localhost
Unless you are using a fact cache, you normally need to use a play that gathers facts first, for facts included in the task above.
.. _host_loops:
How do I loop over a list of hosts in a group, inside of a template?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A pretty common pattern is to iterate over a list of hosts inside of a host group, perhaps to populate a template configuration
file with a list of servers. To do this, you can just access the "$groups" dictionary in your template, like this:
.. code-block:: jinja
{% for host in groups['db_servers'] %}
{{ host }}
{% endfor %}
If you need to access facts about these hosts, for instance, the IP address of each hostname,
you need to make sure that the facts have been populated. For example, make sure you have a play that talks to db_servers::
- hosts: db_servers
tasks:
- debug: msg="doesn't matter what you do, just that they were talked to previously."
Then you can use the facts inside your template, like this:
.. code-block:: jinja
{% for host in groups['db_servers'] %}
{{ hostvars[host]['ansible_eth0']['ipv4']['address'] }}
{% endfor %}
.. _programatic_access_to_a_variable:
How do I access a variable name programmatically?
+++++++++++++++++++++++++++++++++++++++++++++++++
An example may come up where we need to get the ipv4 address of an arbitrary interface, where the interface to be used may be supplied
via a role parameter or other input. Variable names can be built by adding strings together using "~", like so:
.. code-block:: jinja
{{ hostvars[inventory_hostname]['ansible_' ~ which_interface]['ipv4']['address'] }}
The trick about going through hostvars is necessary because it's a dictionary of the entire namespace of variables. ``inventory_hostname``
is a magic variable that indicates the current host you are looping over in the host loop.
In the example above, if your interface names have dashes, you must replace them with underscores:
.. code-block:: jinja
{{ hostvars[inventory_hostname]['ansible_' ~ which_interface | replace('_', '-') ]['ipv4']['address'] }}
Also see dynamic_variables_.
.. _access_group_variable:
How do I access a group variable?
+++++++++++++++++++++++++++++++++
Technically, you don't, Ansible does not really use groups directly. Groups are labels for host selection and a way to bulk assign variables,
they are not a first class entity, Ansible only cares about Hosts and Tasks.
That said, you could just access the variable by selecting a host that is part of that group, see first_host_in_a_group_ below for an example.
.. _first_host_in_a_group:
How do I access a variable of the first host in a group?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++
What happens if we want the ip address of the first webserver in the webservers group? Well, we can do that too. Note that if we
are using dynamic inventory, which host is the 'first' may not be consistent, so you wouldn't want to do this unless your inventory
is static and predictable. (If you are using AWX or the :ref:`Red Hat Ansible Automation Platform <ansible_platform>`, it will use database order, so this isn't a problem even if you are using cloud
based inventory scripts).
Anyway, here's the trick:
.. code-block:: jinja
{{ hostvars[groups['webservers'][0]]['ansible_eth0']['ipv4']['address'] }}
Notice how we're pulling out the hostname of the first machine of the webservers group. If you are doing this in a template, you
could use the Jinja2 '#set' directive to simplify this, or in a playbook, you could also use set_fact::
- set_fact: headnode={{ groups['webservers'][0] }}
- debug: msg={{ hostvars[headnode].ansible_eth0.ipv4.address }}
Notice how we interchanged the bracket syntax for dots -- that can be done anywhere.
.. _file_recursion:
How do I copy files recursively onto a target host?
+++++++++++++++++++++++++++++++++++++++++++++++++++
The ``copy`` module has a recursive parameter. However, take a look at the ``synchronize`` module if you want to do something more efficient
for a large number of files. The ``synchronize`` module wraps rsync. See the module index for info on both of these modules.
.. _shell_env:
How do I access shell environment variables?
++++++++++++++++++++++++++++++++++++++++++++
**On controller machine :** Access existing variables from controller use the ``env`` lookup plugin.
For example, to access the value of the HOME environment variable on the management machine::
---
# ...
vars:
local_home: "{{ lookup('env','HOME') }}"
**On target machines :** Environment variables are available via facts in the ``ansible_env`` variable:
.. code-block:: jinja
{{ ansible_env.HOME }}
If you need to set environment variables for TASK execution, see :ref:`playbooks_environment`
in the :ref:`Advanced Playbooks <playbooks_special_topics>` section.
There are several ways to set environment variables on your target machines. You can use the
:ref:`template <template_module>`, :ref:`replace <replace_module>`, or :ref:`lineinfile <lineinfile_module>`
modules to introduce environment variables into files. The exact files to edit vary depending on your OS
and distribution and local configuration.
.. _user_passwords:
How do I generate encrypted passwords for the user module?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Ansible ad hoc command is the easiest option:
.. code-block:: shell-session
ansible all -i localhost, -m debug -a "msg={{ 'mypassword' | password_hash('sha512', 'mysecretsalt') }}"
The ``mkpasswd`` utility that is available on most Linux systems is also a great option:
.. code-block:: shell-session
mkpasswd --method=sha-512
If this utility is not installed on your system (for example, you are using macOS) then you can still easily
generate these passwords using Python. First, ensure that the `Passlib <https://foss.heptapod.net/python-libs/passlib/-/wikis/home>`_
password hashing library is installed:
.. code-block:: shell-session
pip install passlib
Once the library is ready, SHA512 password values can then be generated as follows:
.. code-block:: shell-session
python -c "from passlib.hash import sha512_crypt; import getpass; print(sha512_crypt.using(rounds=5000).hash(getpass.getpass()))"
Use the integrated :ref:`hash_filters` to generate a hashed version of a password.
You shouldn't put plaintext passwords in your playbook or host_vars; instead, use :ref:`playbooks_vault` to encrypt sensitive data.
In OpenBSD, a similar option is available in the base system called ``encrypt (1)``
.. _dot_or_array_notation:
Ansible allows dot notation and array notation for variables. Which notation should I use?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
The dot notation comes from Jinja and works fine for variables without special
characters. If your variable contains dots (.), colons (:), or dashes (-), if
a key begins and ends with two underscores, or if a key uses any of the known
public attributes, it is safer to use the array notation. See :ref:`playbooks_variables`
for a list of the known public attributes.
.. code-block:: jinja
item[0]['checksum:md5']
item['section']['2.1']
item['region']['Mid-Atlantic']
It is {{ temperature['Celsius']['-3'] }} outside.
Also array notation allows for dynamic variable composition, see dynamic_variables_.
Another problem with 'dot notation' is that some keys can cause problems because they collide with attributes and methods of python dictionaries.
.. code-block:: jinja
item.update # this breaks if item is a dictionary, as 'update()' is a python method for dictionaries
item['update'] # this works
.. _argsplat_unsafe:
When is it unsafe to bulk-set task arguments from a variable?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
You can set all of a task's arguments from a dictionary-typed variable. This
technique can be useful in some dynamic execution scenarios. However, it
introduces a security risk. We do not recommend it, so Ansible issues a
warning when you do something like this::
#...
vars:
usermod_args:
name: testuser
state: present
update_password: always
tasks:
- user: '{{ usermod_args }}'
This particular example is safe. However, constructing tasks like this is
risky because the parameters and values passed to ``usermod_args`` could
be overwritten by malicious values in the ``host facts`` on a compromised
target machine. To mitigate this risk:
* set bulk variables at a level of precedence greater than ``host facts`` in the order of precedence
found in :ref:`ansible_variable_precedence` (the example above is safe because play vars take
precedence over facts)
* disable the :ref:`inject_facts_as_vars` configuration setting to prevent fact values from colliding
with variables (this will also disable the original warning)
.. _commercial_support:
Can I get training on Ansible?
++++++++++++++++++++++++++++++
Yes! See our `services page <https://www.ansible.com/products/consulting>`_ for information on our services
and training offerings. Email `[email protected] <mailto:[email protected]>`_ for further details.
We also offer free web-based training classes on a regular basis. See our
`webinar page <https://www.ansible.com/resources/webinars-training>`_ for more info on upcoming webinars.
.. _web_interface:
Is there a web interface / REST API / GUI?
++++++++++++++++++++++++++++++++++++++++++++
Yes! The open-source web interface is Ansible AWX. The supported Red Hat product that makes Ansible even more powerful and easy to use is :ref:`Red Hat Ansible Automation Platform <ansible_platform>`.
.. _keep_secret_data:
How do I keep secret data in my playbook?
+++++++++++++++++++++++++++++++++++++++++
If you would like to keep secret data in your Ansible content and still share it publicly or keep things in source control, see :ref:`playbooks_vault`.
If you have a task that you don't want to show the results or command given to it when using -v (verbose) mode, the following task or playbook attribute can be useful::
- name: secret task
shell: /usr/bin/do_something --value={{ secret_value }}
no_log: True
This can be used to keep verbose output but hide sensitive information from others who would otherwise like to be able to see the output.
The ``no_log`` attribute can also apply to an entire play::
- hosts: all
no_log: True
Though this will make the play somewhat difficult to debug. It's recommended that this
be applied to single tasks only, once a playbook is completed. Note that the use of the
``no_log`` attribute does not prevent data from being shown when debugging Ansible itself via
the :envvar:`ANSIBLE_DEBUG` environment variable.
.. _when_to_use_brackets:
.. _dynamic_variables:
.. _interpolate_variables:
When should I use {{ }}? Also, how to interpolate variables or dynamic variable names
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A steadfast rule is 'always use ``{{ }}`` except when ``when:``'.
Conditionals are always run through Jinja2 as to resolve the expression,
so ``when:``, ``failed_when:`` and ``changed_when:`` are always templated and you should avoid adding ``{{ }}``.
In most other cases you should always use the brackets, even if previously you could use variables without
specifying (like ``loop`` or ``with_`` clauses), as this made it hard to distinguish between an undefined variable and a string.
Another rule is 'moustaches don't stack'. We often see this:
.. code-block:: jinja
{{ somevar_{{other_var}} }}
The above DOES NOT WORK as you expect, if you need to use a dynamic variable use the following as appropriate:
.. code-block:: jinja
{{ hostvars[inventory_hostname]['somevar_' ~ other_var] }}
For 'non host vars' you can use the :ref:`vars lookup<vars_lookup>` plugin:
.. code-block:: jinja
{{ lookup('vars', 'somevar_' ~ other_var) }}
To determine if a keyword requires ``{{ }}`` or even supports templating, use ``ansible-doc -t keyword <name>``,
this will return documentation on the keyword including a ``template`` field with the values ``explicit`` (requires ``{{ }}``),
``implicit`` (assumes ``{{ }}``, so no needed) or ``static`` (no templating supported, all characters will be interpreted literally)
.. _why_no_wheel:
Why don't you ship ansible in wheel format (or other packaging format) ?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
In most cases it has to do with maintainability. There are many ways to ship software and we do not have
the resources to release Ansible on every platform.
In some cases there are technical issues. For example, our dependencies are not present on Python Wheels.
.. _ansible_host_delegated:
How do I get the original ansible_host when I delegate a task?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
As the documentation states, connection variables are taken from the ``delegate_to`` host so ``ansible_host`` is overwritten,
but you can still access the original via ``hostvars``::
original_host: "{{ hostvars[inventory_hostname]['ansible_host'] }}"
This works for all overridden connection variables, like ``ansible_user``, ``ansible_port``, and so on.
.. _scp_protocol_error_filename:
How do I fix 'protocol error: filename does not match request' when fetching a file?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Since release ``7.9p1`` of OpenSSH there is a `bug <https://bugzilla.mindrot.org/show_bug.cgi?id=2966>`_
in the SCP client that can trigger this error on the Ansible controller when using SCP as the file transfer mechanism::
failed to transfer file to /tmp/ansible/file.txt\r\nprotocol error: filename does not match request
In these releases, SCP tries to validate that the path of the file to fetch matches the requested path.
The validation
fails if the remote filename requires quotes to escape spaces or non-ascii characters in its path. To avoid this error:
* Use SFTP instead of SCP by setting ``scp_if_ssh`` to ``smart`` (which tries SFTP first) or to ``False``. You can do this in one of four ways:
* Rely on the default setting, which is ``smart`` - this works if ``scp_if_ssh`` is not explicitly set anywhere
* Set a :ref:`host variable <host_variables>` or :ref:`group variable <group_variables>` in inventory: ``ansible_scp_if_ssh: False``
* Set an environment variable on your control node: ``export ANSIBLE_SCP_IF_SSH=False``
* Pass an environment variable when you run Ansible: ``ANSIBLE_SCP_IF_SSH=smart ansible-playbook``
* Modify your ``ansible.cfg`` file: add ``scp_if_ssh=False`` to the ``[ssh_connection]`` section
* If you must use SCP, set the ``-T`` arg to tell the SCP client to ignore path validation. You can do this in one of three ways:
* Set a :ref:`host variable <host_variables>` or :ref:`group variable <group_variables>`: ``ansible_scp_extra_args=-T``,
* Export or pass an environment variable: ``ANSIBLE_SCP_EXTRA_ARGS=-T``
* Modify your ``ansible.cfg`` file: add ``scp_extra_args=-T`` to the ``[ssh_connection]`` section
.. note:: If you see an ``invalid argument`` error when using ``-T``, then your SCP client is not performing filename validation and will not trigger this error.
.. _mfa_support:
Does Ansible support multiple factor authentication 2FA/MFA/biometrics/finterprint/usbkey/OTP/...
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
No, Ansible is designed to execute multiple tasks against multiple targets, minimizing user interaction.
As with most automation tools, it is not compatible with interactive security systems designed to handle human interaction.
Most of these systems require a secondary prompt per target, which prevents scaling to thousands of targets. They also
tend to have very short expiration periods so it requires frequent reauthorization, also an issue with many hosts and/or
a long set of tasks.
In such environments we recommend securing around Ansible's execution but still allowing it to use an 'automation user' that does not require such measures.
With AWX or the :ref:`Red Hat Ansible Automation Platform <ansible_platform>`, administrators can set up RBAC access to inventory, along with managing credentials and job execution.
.. _complex_configuration_validation:
The 'validate' option is not enough for my needs, what do I do?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Many Ansible modules that create or update files have a ``validate`` option that allows you to abort the update if the validation command fails.
This uses the temporary file Ansible creates before doing the final update. In many cases this does not work since the validation tools
for the specific application require either specific names, multiple files or some other factor that is not present in this simple feature.
For these cases you have to handle the validation and restoration yourself. The following is a simple example of how to do this with block/rescue
and backups, which most file based modules also support:
.. code-block:: yaml
- name: update config and backout if validation fails
block:
- name: do the actual update, works with copy, lineinfile and any action that allows for `backup`.
template: src=template.j2 dest=/x/y/z backup=yes moreoptions=stuff
register: updated
- name: run validation, this will change a lot as needed. We assume it returns an error when not passing, use `failed_when` if otherwise.
shell: run_validation_commmand
become: yes
become_user: requiredbyapp
environment:
WEIRD_REQUIREMENT: 1
rescue:
- name: restore backup file to original, in the hope the previous configuration was working.
copy:
remote_src: yes
dest: /x/y/z
src: "{{ updated['backup_file'] }}"
always:
- name: We choose to always delete backup, but could copy or move, or only delete in rescue.
file:
path: "{{ updated['backup_file'] }}"
state: absent
.. _jinja2_faqs:
Why does the ``regex_search`` filter return `None` instead of an empty string?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Until the jinja2 2.10 release, Jinja was only able to return strings, but Ansible needed Python objects in some cases. Ansible uses ``safe_eval`` and only sends strings that look like certain types of Python objects through this function. With ``regex_search`` that does not find a match, the result (``None``) is converted to the string "None" which is not useful in non-native jinja2.
The following example of a single templating action shows this behavior:
.. code-block:: Jinja
{{ 'ansible' | regex_search('foobar') }}
This example does not result in a Python ``None``, so Ansible historically converted it to "" (empty string).
The native jinja2 functionality actually allows us to return full Python objects, that are always represented as Python objects everywhere, and as such the result of a single templating action with ``regex_search`` can result in the Python ``None``.
.. note::
Native jinja2 functionality is not needed when ``regex_search`` is used as an intermediate result that is then compared to the jinja2 ``none`` test.
.. code-block:: Jinja
{{ 'ansible' | regex_search('foobar') is none }}
.. _docs_contributions:
How do I submit a change to the documentation?
++++++++++++++++++++++++++++++++++++++++++++++
Documentation for Ansible is kept in the main project git repository, and complete instructions
for contributing can be found in the docs README `viewable on GitHub <https://github.com/ansible/ansible/blob/devel/docs/docsite/README.md>`_. Thanks!
.. _i_dont_see_my_question:
I don't see my question here
++++++++++++++++++++++++++++
If you have not found an answer to your questions, you can ask on one of our mailing lists or chat channels. For instructions on subscribing to a list or joining a chat channel, see :ref:`communication`.
.. seealso::
:ref:`working_with_playbooks`
An introduction to playbooks
:ref:`playbooks_best_practices`
Tips and tricks for playbooks
`User Mailing List <https://groups.google.com/group/ansible-project>`_
Have a question? Stop by the google group!
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,347 |
Clarify in docs that Community Galaxy cannot provide signatures
|
### Summary
The documentation seems to imply that the community Galaxy server can supply signatures, which it can't. Clarify that only AH and self-hosted pulp or on-prem hub servers will have signatures available:
https://github.com/ansible/ansible/pull/78326/files#diff-3147c791e9e90d93c0f1468a3d5769f74d598bf69ed68f3553544454d36b3fc5R276-R285
### Issue Type
Documentation Report
### Component Name
docs/docsite/rst/user_guide/collections_using.rst
### Ansible Version
```console
$ ansible --version
2.13
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
none
```
### OS / Environment
none
### Additional Information
Fix needs to be backported to 2.13 as well
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78347
|
https://github.com/ansible/ansible/pull/78386
|
ccf1a631585149c612250f466044f88b43518393
|
7ae1eabaa41080fc97e0155919471f57dba23825
| 2022-07-25T19:53:08Z |
python
| 2022-08-16T20:05:06Z |
docs/docsite/rst/dev_guide/developing_collections_distributing.rst
|
.. _distributing_collections:
************************
Distributing collections
************************
A collection is a distribution format for Ansible content. A typical collection contains modules and other plugins that address a set of related use cases. For example, a collection might automate administering a particular database. A collection can also contain roles and playbooks.
To distribute your collection and allow others to use it, you can publish your collection on one or more distribution servers. Distribution servers include:
================================= ========================================================
Distribution server Collections accepted
================================= ========================================================
Ansible Galaxy All collections
Red Hat Automation Hub Only collections certified by Red Hat
Privately hosted Automation Hub Collections authorized by the owners
================================= ========================================================
Distributing collections involves four major steps:
#. Initial configuration of your distribution server or servers
#. Building your collection tarball
#. Preparing to publish your collection
#. Publishing your collection
.. contents::
:local:
:depth: 2
.. _config_distribution_server:
Initial configuration of your distribution server or servers
============================================================
Configure a connection to one or more distribution servers so you can publish collections there. You only need to configure each distribution server once. You must repeat the other steps (building your collection tarball, preparing to publish, and publishing your collection) every time you publish a new collection or a new version of an existing collection.
1. Create a namespace on each distribution server you want to use.
2. Get an API token for each distribution server you want to use.
3. Specify the API token for each distribution server you want to use.
.. _get_namespace:
Creating a namespace
--------------------
You must upload your collection into a namespace on each distribution server. If you have a login for Ansible Galaxy, your Ansible Galaxy username is usually also an Ansible Galaxy namespace.
.. warning::
Namespaces on Ansible Galaxy cannot include hyphens. If you have a login for Ansible Galaxy that includes a hyphen, your Galaxy username is not also a Galaxy namespace. For example, ``awesome-user`` is a valid username for Ansible Galaxy, but it is not a valid namespace.
You can create additional namespaces on Ansible Galaxy if you choose. For Red Hat Automation Hub and private Automation Hub you must create a namespace before you can upload your collection. To create a namespace:
* To create a namespace on Galaxy, see `Galaxy namespaces <https://galaxy.ansible.com/docs/contributing/namespaces.html#galaxy-namespaces>`_ on the Galaxy docsite for details.
* To create a namespace on Red Hat Automation Hub, see the `Ansible Certified Content FAQ <https://access.redhat.com/articles/4916901>`_.
Specify the namespace in the :file:`galaxy.yml` file for each collection. For more information on the :file:`galaxy.yml` file, see :ref:`collections_galaxy_meta`.
.. _galaxy_get_token:
Getting your API token
----------------------
An API token authenticates your connection to each distribution server. You need a separate API token for each distribution server. Use the correct API token to connect to each distribution server securely and protect your content.
To get your API token:
* To get an API token for Galaxy, go to the `Galaxy profile preferences <https://galaxy.ansible.com/me/preferences>`_ page and click :guilabel:`API Key`.
* To get an API token for Automation Hub, go to `the token page <https://cloud.redhat.com/ansible/automation-hub/token/>`_ and click :guilabel:`Load token`.
.. _galaxy_specify_token:
Specifying your API token and distribution server
-------------------------------------------------
Each time you publish a collection, you must specify the API token and the distribution server to create a secure connection. You have two options for specifying the token and distribution server:
* You can configure the token in configuration, as part of a ``galaxy_server_list`` entry in your :file:`ansible.cfg` file. Using configuration is the most secure option.
* You can pass the token at the command line as an argument to the ``ansible-galaxy`` command. If you pass the token at the command line, you can specify the server at the command line, by using the default setting, or by setting the server in configuration. Passing the token at the command line is insecure, because typing secrets at the command line may expose them to other users on the system.
.. _galaxy_token_ansible_cfg:
Specifying the token and distribution server in configuration
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
By default, Ansible Galaxy is configured as the only distribution server. You can add other distribution servers and specify your API token or tokens in configuration by editing the ``galaxy_server_list`` section of your :file:`ansible.cfg` file. This is the most secure way to manage authentication for distribution servers. Specify a URL and token for each server. For example:
.. code-block:: ini
[galaxy]
server_list = release_galaxy
[galaxy_server.release_galaxy]
url=https://galaxy.ansible.com/
token=abcdefghijklmnopqrtuvwxyz
You cannot use ``apt-key`` with any servers defined in your :ref:`galaxy_server_list <galaxy_server_config>`. See :ref:`galaxy_server_config` for complete details.
.. _galaxy_use_token_arg:
Specifying the token at the command line
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can specify the API token at the command line using the ``--token`` argument of the :ref:`ansible-galaxy` command. There are three ways to specify the distribution server when passing the token at the command line:
* using the ``--server`` argument of the :ref:`ansible-galaxy` command
* relying on the default (https://galaxy.ansible.com)
* setting a server in configuration by creating a :ref:`GALAXY_SERVER` setting in your :file:`ansible.cfg` file
For example:
.. code-block:: bash
ansible-galaxy collection publish path/to/my_namespace-my_collection-1.0.0.tar.gz --token abcdefghijklmnopqrtuvwxyz
.. warning::
Using the ``--token`` argument is insecure. Passing secrets at the command line may expose them to others on the system.
.. _building_collections:
Building your collection tarball
================================
After configuring one or more distribution servers, build a collection tarball. The collection tarball is the published artifact, the object that you upload and other users download to install your collection. To build a collection tarball:
#. Review the version number in your :file:`galaxy.yml` file. Each time you publish your collection, it must have a new version number. You cannot make changes to existing versions of your collection on a distribution server. If you try to upload the same collection version more than once, the distribution server returns the error ``Code: conflict.collection_exists``. Collections follow semantic versioning rules. For more information on versions, see :ref:`collection_versions`. For more information on the :file:`galaxy.yml` file, see :ref:`collections_galaxy_meta`.
#. Run ``ansible-galaxy collection build`` from inside the top-level directory of the collection. For example:
.. code-block:: bash
collection_dir#> ansible-galaxy collection build
This command builds a tarball of the collection in the current directory, which you can upload to your selected distribution server::
my_collection/
├── galaxy.yml
├── ...
├── my_namespace-my_collection-1.0.0.tar.gz
└── ...
.. note::
* To reduce the size of collections, certain files and folders are excluded from the collection tarball by default. See :ref:`ignoring_files_and_folders_collections` if your collection directory contains other files you want to exclude.
* The current Galaxy maximum tarball size is 2 MB.
You can upload your tarball to one or more distribution servers. You can also distribute your collection locally by copying the tarball to install your collection directly on target systems.
.. _ignoring_files_and_folders_collections:
Ignoring files and folders
--------------------------
By default the build step includes all the files in the collection directory in the tarball except for the following:
* ``galaxy.yml``
* ``*.pyc``
* ``*.retry``
* ``tests/output``
* previously built tarballs in the root directory
* various version control directories such as ``.git/``
To exclude other files and folders from your collection tarball, set a list of file glob-like patterns in the ``build_ignore`` key in the collection's ``galaxy.yml`` file. These patterns use the following special characters for wildcard matching:
* ``*``: Matches everything
* ``?``: Matches any single character
* ``[seq]``: Matches any character in sequence
* ``[!seq]``:Matches any character not in sequence
For example, to exclude the :file:`sensitive` folder within the ``playbooks`` folder as well any ``.tar.gz`` archives, set the following in your ``galaxy.yml`` file:
.. code-block:: yaml
build_ignore:
- playbooks/sensitive
- '*.tar.gz'
For more information on the :file:`galaxy.yml` file, see :ref:`collections_galaxy_meta`.
.. note::
The ``build_ignore`` feature is only supported with ``ansible-galaxy collection build`` in Ansible 2.10 or newer.
.. _trying_collection_locally:
Preparing to publish your collection
====================================
Each time you publish your collection, you must create a :ref:`new version <collection_versions>` on the distribution server. After you publish a version of a collection, you cannot delete or modify that version. To avoid unnecessary extra versions, check your collection for bugs, typos, and other issues locally before publishing:
#. Install the collection locally.
#. Review the locally installed collection before publishing a new version.
Installing your collection locally
----------------------------------
You have two options for installing your collection locally:
* Install your collection locally from the tarball.
* Install your collection locally from your git repository.
Installing your collection locally from the tarball
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To install your collection locally from the tarball, run ``ansible-galaxy collection install`` and specify the collection tarball. You can optionally specify a location using the ``-p`` flag. For example:
.. code-block:: bash
collection_dir#> ansible-galaxy collection install my_namespace-my_collection-1.0.0.tar.gz -p ./collections
Install the tarball into a directory configured in :ref:`COLLECTIONS_PATHS` so Ansible can easily find and load the collection. If you do not specify a path value, ``ansible-galaxy collection install`` installs the collection in the first path defined in :ref:`COLLECTIONS_PATHS`.
.. _collections_scm_install:
Installing your collection locally from a git repository
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To install your collection locally from a git repository, specify the repository and the branch you want to install:
.. code-block:: bash
collection_dir#> ansible-galaxy collection install git+https://github.com/org/repo.git,devel
.. include:: ../shared_snippets/installing_collections_git_repo.txt
Reviewing your collection
-------------------------
Review the collection:
* Run a playbook that uses the modules and plugins in your collection. Verify that new features and functionality work as expected. For examples and more details see :ref:`Using collections <using_collections>`.
* Check the documentation for typos.
* Check that the version number of your tarball is higher than the latest published version on the distribution server or servers.
* If you find any issues, fix them and rebuild the collection tarball.
.. _collection_versions:
Understanding collection versioning
-----------------------------------
The only way to change a collection is to release a new version. The latest version of a collection (by highest version number) is the version displayed everywhere in Galaxy and Automation Hub. Users can still download older versions.
Follow semantic versioning when setting the version for your collection. In summary:
* Increment the major version number, ``x`` of ``x.y.z``, for an incompatible API change.
* Increment the minor version number, ``y`` of ``x.y.z``, for new functionality in a backwards compatible manner (for example new modules/plugins, parameters, return values).
* Increment the patch version number, ``z`` of ``x.y.z``, for backwards compatible bug fixes.
Read the official `Semantic Versioning <https://semver.org/>`_ documentation for details and examples.
.. _publish_collection:
Publishing your collection
==========================
The last step in distributing your collection is publishing the tarball to Ansible Galaxy, Red Hat Automation Hub, or a privately hosted Automation Hub instance. You can publish your collection in two ways:
* from the command line using the ``ansible-galaxy collection publish`` command
* from the website of the distribution server (Galaxy, Automation Hub) itself
.. _upload_collection_ansible_galaxy:
.. _publish_collection_galaxy_cmd:
Publishing a collection from the command line
---------------------------------------------
To upload the collection tarball from the command line using ``ansible-galaxy``:
.. code-block:: bash
ansible-galaxy collection publish path/to/my_namespace-my_collection-1.0.0.tar.gz
.. note::
This ansible-galaxy command assumes you have retrieved and stored your API token in configuration. See :ref:`galaxy_specify_token` for details.
The ``ansible-galaxy collection publish`` command triggers an import process, just as if you uploaded the collection through the Galaxy website. The command waits until the import process completes before reporting the status back. If you want to continue without waiting for the import result, use the ``--no-wait`` argument and manually look at the import progress in your `My Imports <https://galaxy.ansible.com/my-imports/>`_ page.
.. _upload_collection_galaxy:
Publishing a collection from the website
----------------------------------------
To publish your collection directly on the Galaxy website:
#. Go to the `My Content <https://galaxy.ansible.com/my-content/namespaces>`_ page, and click the **Add Content** button on one of your namespaces.
#. From the **Add Content** dialogue, click **Upload New Collection**, and select the collection archive file from your local filesystem.
When you upload a collection, Ansible always uploads the tarball to the namespace specified in the collection metadata in the ``galaxy.yml`` file, no matter which namespace you select on the website. If you are not an owner of the namespace specified in your collection metadata, the upload request fails.
After Galaxy uploads and accepts a collection, the website shows you the **My Imports** page. This page shows import process information. You can review any errors or warnings about your upload there.
.. seealso::
:ref:`collections`
Learn how to install and use collections.
:ref:`collections_galaxy_meta`
Table of fields used in the :file:`galaxy.yml` file
`Mailing List <https://groups.google.com/group/ansible-devel>`_
The development mailing list
:ref:`communication_irc`
How to join Ansible chat channels
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,347 |
Clarify in docs that Community Galaxy cannot provide signatures
|
### Summary
The documentation seems to imply that the community Galaxy server can supply signatures, which it can't. Clarify that only AH and self-hosted pulp or on-prem hub servers will have signatures available:
https://github.com/ansible/ansible/pull/78326/files#diff-3147c791e9e90d93c0f1468a3d5769f74d598bf69ed68f3553544454d36b3fc5R276-R285
### Issue Type
Documentation Report
### Component Name
docs/docsite/rst/user_guide/collections_using.rst
### Ansible Version
```console
$ ansible --version
2.13
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
none
```
### OS / Environment
none
### Additional Information
Fix needs to be backported to 2.13 as well
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78347
|
https://github.com/ansible/ansible/pull/78386
|
ccf1a631585149c612250f466044f88b43518393
|
7ae1eabaa41080fc97e0155919471f57dba23825
| 2022-07-25T19:53:08Z |
python
| 2022-08-16T20:05:06Z |
docs/docsite/rst/reference_appendices/glossary.rst
|
Glossary
========
The following is a list (and re-explanation) of term definitions used elsewhere in the Ansible documentation.
Consult the documentation home page for the full documentation and to see the terms in context, but this should be a good resource
to check your knowledge of Ansible's components and understand how they fit together. It's something you might wish to read for review or
when a term comes up on the mailing list.
.. glossary::
Action
An action is a part of a task that specifies which of the modules to
run and which arguments to pass to that module. Each task can have
only one action, but it may also have other parameters.
Ad Hoc
Refers to running Ansible to perform some quick command, using
:command:`/usr/bin/ansible`, rather than the :term:`orchestration`
language, which is :command:`/usr/bin/ansible-playbook`. An example
of an ad hoc command might be rebooting 50 machines in your
infrastructure. Anything you can do ad hoc can be accomplished by
writing a :term:`playbook <playbooks>` and playbooks can also glue
lots of other operations together.
Ansible (the package)
A software package (Python, deb, rpm, and so on) that contains ansible-core and a select group of collections. Playbooks that worked with Ansible 2.9 should still work with the Ansible 2.10 package. See the :file:`ansible-<version>.build` file in the release-specific directory at `ansible-build-data <https://github.com/ansible-community/ansible-build-data>`_ for a list of collections included in Ansible, as well as the included ``ansible-core`` version.
ansible-base
Used only for 2.10. The installable package (RPM/Python/Deb package) generated from the `ansible/ansible repository <https://github.com/ansible/ansible>`_. See ``ansible-core``.
ansible-core
Name used starting with 2.11. The installable package (RPM/Python/Deb package) generated from the `ansible/ansible repository <https://github.com/ansible/ansible>`_. Contains the command-line tools and the code for basic features and functions, such as copying module code to managed nodes. The ``ansible-core`` package includes a few modules and plugins and allows you to add others by installing collections.
Ansible Galaxy
An `online resource <galaxy.ansible.com>`_ for finding and sharing Ansible community content. Also, the command-line utility that lets users install individual Ansible Collections, for example`` ansible-galaxy install community.crypto``.
Async
Refers to a task that is configured to run in the background rather
than waiting for completion. If you have a long process that would
run longer than the SSH timeout, it would make sense to launch that
task in async mode. Async modes can poll for completion every so many
seconds or can be configured to "fire and forget", in which case
Ansible will not even check on the task again; it will just kick it
off and proceed to future steps. Async modes work with both
:command:`/usr/bin/ansible` and :command:`/usr/bin/ansible-playbook`.
Callback Plugin
Refers to some user-written code that can intercept results from
Ansible and do something with them. Some supplied examples in the
GitHub project perform custom logging, send email, or even play sound
effects.
Check Mode
Refers to running Ansible with the ``--check`` option, which does not
make any changes on the remote systems, but only outputs the changes
that might occur if the command ran without this flag. This is
analogous to so-called "dry run" modes in other systems, though the
user should be warned that this does not take into account unexpected
command failures or cascade effects (which is true of similar modes in
other systems). Use this to get an idea of what might happen, but do
not substitute it for a good staging environment.
Collection
A packaging format for bundling and distributing Ansible content, including plugins, roles, modules, and more. Collections release independent of other collections or ``ansible-core`` so features can be available sooner to users. Some collections are packaged with Ansible (version 2.10 or later). You can install other collections (or other versions of collections) with ``ansible-galaxy collection install <namespace.collection>``.
Collection name
The second part of a Fully Qualified Collection Name. The collection name divides the collection namespace and usually reflects the function of the collection content. For example, the ``cisco`` namespace might contain ``cisco.ios``, ``cisco.aci``, and ``cisco.nxos``, with content for managing the different network devices maintained by Cisco.
community.general (collection)
A special collection managed by the Ansible Community Team containing all the modules and plugins which shipped in Ansible 2.9 that do not have their own dedicated Collection. See `community.general <https://galaxy.ansible.com/community/general>`_ on Galaxy.
community.network (collection)
Similar to ``community.general``, focusing on network content. `community.network <https://galaxy.ansible.com/community/network>`_ on Galaxy.
Connection Plugin
By default, Ansible talks to remote machines through pluggable
libraries. Ansible uses native OpenSSH (:term:`SSH (Native)`) or
a Python implementation called :term:`paramiko`. OpenSSH is preferred
if you are using a recent version, and also enables some features like
Kerberos and jump hosts. This is covered in the :ref:`getting
started section <remote_connection_information>`. There are also
other connection types like ``accelerate`` mode, which must be
bootstrapped over one of the SSH-based connection types but is very
fast, and local mode, which acts on the local system. Users can also
write their own connection plugins.
Conditionals
A conditional is an expression that evaluates to true or false that
decides whether a given task is executed on a given machine or not.
Ansible's conditionals are powered by the 'when' statement, which are
discussed in the :ref:`working_with_playbooks`.
Declarative
An approach to achieving a task that uses a description of the
final state rather than a description of the sequence of steps
necessary to achieve that state. For a real world example, a
declarative specification of a task would be: "put me in California".
Depending on your current location, the sequence of steps to get you to
California may vary, and if you are already in California, nothing
at all needs to be done. Ansible's Resources are declarative; it
figures out the steps needed to achieve the final state. It also lets
you know whether or not any steps needed to be taken to get to the
final state.
Diff Mode
A ``--diff`` flag can be passed to Ansible to show what changed on
modules that support it. You can combine it with ``--check`` to get a
good 'dry run'. File diffs are normally in unified diff format.
Executor
A core software component of Ansible that is the power behind
:command:`/usr/bin/ansible` directly -- and corresponds to the
invocation of each task in a :term:`playbook <playbooks>`. The
Executor is something Ansible developers may talk about, but it's not
really user land vocabulary.
Facts
Facts are simply things that are discovered about remote nodes. While
they can be used in :term:`playbooks` and templates just like
variables, facts are things that are inferred, rather than set. Facts
are automatically discovered by Ansible when running plays by
executing the internal :ref:`setup module <setup_module>` on the remote nodes. You
never have to call the setup module explicitly, it just runs, but it
can be disabled to save time if it is not needed or you can tell
ansible to collect only a subset of the full facts via the
``gather_subset:`` option. For the convenience of users who are
switching from other configuration management systems, the fact module
will also pull in facts from the :program:`ohai` and :program:`facter`
tools if they are installed. These are fact libraries from Chef and
Puppet, respectively. (These may also be disabled via
``gather_subset:``)
Filter Plugin
A filter plugin is something that most users will never need to
understand. These allow for the creation of new :term:`Jinja2`
filters, which are more or less only of use to people who know what
Jinja2 filters are. If you need them, you can learn how to write them
in the :ref:`API docs section <developing_filter_plugins>`.
Forks
Ansible talks to remote nodes in parallel and the level of parallelism
can be set either by passing ``--forks`` or editing the default in
a configuration file. The default is a very conservative five (5)
forks, though if you have a lot of RAM, you can easily set this to
a value like 50 for increased parallelism.
Fully Qualified Collection Name (FQCN)
The full definition of a module, plugin, or role hosted within a collection, in the form <namespace.collection.content_name>. Allows a Playbook to refer to a specific module or plugin from a specific source in an unambiguous manner, for example, ``community.grafana.grafana_dashboard``. The FQCN is required when you want to specify the exact source of a plugin. For example, if multiple collections contain a module plugin called ``user``, the FQCN specifies which one to use for a given task. When you have multiple collections installed, the FQCN is always the explicit and authoritative indicator of which collection to search for the correct plugin for each task.
Gather Facts (Boolean)
:term:`Facts` are mentioned above. Sometimes when running a multi-play
:term:`playbook <playbooks>`, it is desirable to have some plays that
don't bother with fact computation if they aren't going to need to
utilize any of these values. Setting ``gather_facts: False`` on
a playbook allows this implicit fact gathering to be skipped.
Globbing
Globbing is a way to select lots of hosts based on wildcards, rather
than the name of the host specifically, or the name of the group they
are in. For instance, it is possible to select ``ww*`` to match all
hosts starting with ``www``. This concept is pulled directly from
:program:`Func`, one of Michael DeHaan's (an Ansible Founder) earlier
projects. In addition to basic globbing, various set operations are
also possible, such as 'hosts in this group and not in another group',
and so on.
Group
A group consists of several hosts assigned to a pool that can be
conveniently targeted together, as well as given variables that they
share in common.
Group Vars
The :file:`group_vars/` files are files that live in a directory
alongside an inventory file, with an optional filename named after
each group. This is a convenient place to put variables that are
provided to a given group, especially complex data structures, so that
these variables do not have to be embedded in the :term:`inventory`
file or :term:`playbook <playbooks>`.
Handlers
Handlers are just like regular tasks in an Ansible
:term:`playbook <playbooks>` (see :term:`Tasks`) but are only run if
the Task contains a ``notify`` keyword and also indicates that it
changed something. For example, if a config file is changed, then the
task referencing the config file templating operation may notify
a service restart handler. This means services can be bounced only if
they need to be restarted. Handlers can be used for things other than
service restarts, but service restarts are the most common usage.
Host
A host is simply a remote machine that Ansible manages. They can have
individual variables assigned to them, and can also be organized in
groups. All hosts have a name they can be reached at (which is either
an IP address or a domain name) and, optionally, a port number, if they
are not to be accessed on the default SSH port.
Host Specifier
Each :term:`Play <plays>` in Ansible maps a series of :term:`tasks` (which define the role,
purpose, or orders of a system) to a set of systems.
This ``hosts:`` keyword in each play is often called the hosts specifier.
It may select one system, many systems, one or more groups, or even
some hosts that are in one group and explicitly not in another.
Host Vars
Just like :term:`Group Vars`, a directory alongside the inventory file named
:file:`host_vars/` can contain a file named after each hostname in the
inventory file, in :term:`YAML` format. This provides a convenient place to
assign variables to the host without having to embed them in the
:term:`inventory` file. The Host Vars file can also be used to define complex
data structures that can't be represented in the inventory file.
Idempotency
An operation is idempotent if the result of performing it once is
exactly the same as the result of performing it repeatedly without
any intervening actions.
Includes
The idea that :term:`playbook <playbooks>` files (which are nothing
more than lists of :term:`plays`) can include other lists of plays,
and task lists can externalize lists of :term:`tasks` in other files,
and similarly with :term:`handlers`. Includes can be parameterized,
which means that the loaded file can pass variables. For instance, an
included play for setting up a WordPress blog may take a parameter
called ``user`` and that play could be included more than once to
create a blog for both ``alice`` and ``bob``.
Inventory
A file (by default, Ansible uses a simple INI format) that describes
:term:`Hosts <Host>` and :term:`Groups <Group>` in Ansible. Inventory
can also be provided via an :term:`Inventory Script` (sometimes called
an "External Inventory Script").
Inventory Script
A very simple program (or a complicated one) that looks up
:term:`hosts <Host>`, :term:`group` membership for hosts, and variable
information from an external resource -- whether that be a SQL
database, a CMDB solution, or something like LDAP. This concept was
adapted from Puppet (where it is called an "External Nodes
Classifier") and works more or less exactly the same way.
Jinja2
Jinja2 is the preferred templating language of Ansible's template
module. It is a very simple Python template language that is
generally readable and easy to write.
JSON
Ansible uses JSON for return data from remote modules. This allows
modules to be written in any language, not just Python.
Keyword
The main expressions that make up Ansible, which apply to playbook objects
(Play, Block, Role and Task). For example 'vars:' is a keyword that lets
you define variables in the scope of the playbook object it is applied to.
Lazy Evaluation
In general, Ansible evaluates any variables in
:term:`playbook <playbooks>` content at the last possible second,
which means that if you define a data structure that data structure
itself can define variable values within it, and everything "just
works" as you would expect. This also means variable strings can
include other variables inside of those strings.
Library
A collection of modules made available to :command:`/usr/bin/ansible`
or an Ansible :term:`playbook <playbooks>`.
Limit Groups
By passing ``--limit somegroup`` to :command:`ansible` or
:command:`ansible-playbook`, the commands can be limited to a subset
of :term:`hosts <Host>`. For instance, this can be used to run
a :term:`playbook <playbooks>` that normally targets an entire set of
servers to one particular server.
Local Action
This keyword is an alias for ``delegate_to: localhost``.
Used when you want to redirect an action from the remote to
execute on the controller itself.
Local Connection
By using ``connection: local`` in a :term:`playbook <playbooks>`, or
passing ``-c local`` to :command:`/usr/bin/ansible`, this indicates
that we are executing a local fork instead of executing on the remote machine.
You probably want ``local_action`` or ``delegate_to: localhost`` instead
as this ONLY changes the connection and no other context for execution.
Lookup Plugin
A lookup plugin is a way to get data into Ansible from the outside world.
Lookup plugins are an extension of Jinja2 and can be accessed in templates, for example,
``{{ lookup('file','/path/to/file') }}``.
These are how such things as ``with_items``, are implemented.
There are also lookup plugins like ``file`` which loads data from
a file and ones for querying environment variables, DNS text records,
or key value stores.
Loops
Generally, Ansible is not a programming language. It prefers to be
more declarative, though various constructs like ``loop`` allow
a particular task to be repeated for multiple items in a list.
Certain modules, like :ref:`yum <yum_module>` and :ref:`apt <apt_module>`, actually take
lists directly, and can install all packages given in those lists
within a single transaction, dramatically speeding up total time to
configuration, so they can be used without loops.
Modules
Modules are the units of work that Ansible ships out to remote
machines. Modules are kicked off by either
:command:`/usr/bin/ansible` or :command:`/usr/bin/ansible-playbook`
(where multiple tasks use lots of different modules in conjunction).
Modules can be implemented in any language, including Perl, Bash, or
Ruby -- but can take advantage of some useful communal library code if written
in Python. Modules just have to return :term:`JSON`. Once modules are
executed on remote machines, they are removed, so no long running
daemons are used. Ansible refers to the collection of available
modules as a :term:`library`.
Multi-Tier
The concept that IT systems are not managed one system at a time, but
by interactions between multiple systems and groups of systems in
well defined orders. For instance, a web server may need to be
updated before a database server and pieces on the web server may
need to be updated after *THAT* database server and various load
balancers and monitoring servers may need to be contacted. Ansible
models entire IT topologies and workflows rather than looking at
configuration from a "one system at a time" perspective.
Namespace
The first part of a fully qualified collection name, the namespace usually reflects a functional content category. Example: in ``cisco.ios.ios_config``, ``cisco`` is the namespace. Namespaces are reserved and distributed by Red Hat at Red Hat's discretion. Many, but not all, namespaces will correspond with vendor names. See `Galaxy namespaces <https://galaxy.ansible.com/docs/contributing/namespaces.html#galaxy-namespaces>`_ on the Galaxy docsite for namespace requirements.
Notify
The act of a :term:`task <tasks>` registering a change event and
informing a :term:`handler <handlers>` task that another
:term:`action` needs to be run at the end of the :term:`play <plays>`. If
a handler is notified by multiple tasks, it will still be run only
once. Handlers are run in the order they are listed, not in the order
that they are notified.
Orchestration
Many software automation systems use this word to mean different
things. Ansible uses it as a conductor would conduct an orchestra.
A datacenter or cloud architecture is full of many systems, playing
many parts -- web servers, database servers, maybe load balancers,
monitoring systems, continuous integration systems, and so on. In
performing any process, it is necessary to touch systems in particular
orders, often to simulate rolling updates or to deploy software
correctly. Some system may perform some steps, then others, then
previous systems already processed may need to perform more steps.
Along the way, emails may need to be sent or web services contacted.
Ansible orchestration is all about modeling that kind of process.
paramiko
By default, Ansible manages machines over SSH. The library that
Ansible uses by default to do this is a Python-powered library called
paramiko. The paramiko library is generally fast and easy to manage,
though users who want to use Kerberos or Jump Hosts may wish to switch
to a native SSH binary such as OpenSSH by specifying the connection
type in their :term:`playbooks`, or using the ``-c ssh`` flag.
Playbooks
Playbooks are the language by which Ansible orchestrates, configures,
administers, or deploys systems. They are called playbooks partially
because it's a sports analogy, and it's supposed to be fun using them.
They aren't workbooks :)
Plays
A :term:`playbook <playbooks>` is a list of plays. A play is
minimally a mapping between a set of :term:`hosts <Host>` selected by a host
specifier (usually chosen by :term:`groups <Group>` but sometimes by
hostname :term:`globs <Globbing>`) and the :term:`tasks` which run on those
hosts to define the role that those systems will perform. There can be
one or many plays in a playbook.
Pull Mode
By default, Ansible runs in :term:`push mode`, which allows it very
fine-grained control over when it talks to each system. Pull mode is
provided for when you would rather have nodes check in every N minutes
on a particular schedule. It uses a program called
:command:`ansible-pull` and can also be set up (or reconfigured) using
a push-mode :term:`playbook <playbooks>`. Most Ansible users use push
mode, but pull mode is included for variety and the sake of having
choices.
:command:`ansible-pull` works by checking configuration orders out of
git on a crontab and then managing the machine locally, using the
:term:`local connection` plugin.
Push Mode
Push mode is the default mode of Ansible. In fact, it's not really
a mode at all -- it's just how Ansible works when you aren't thinking
about it. Push mode allows Ansible to be fine-grained and conduct
nodes through complex orchestration processes without waiting for them
to check in.
Register Variable
The result of running any :term:`task <tasks>` in Ansible can be
stored in a variable for use in a template or a conditional statement.
The keyword used to define the variable is called ``register``, taking
its name from the idea of registers in assembly programming (though
Ansible will never feel like assembly programming). There are an
infinite number of variable names you can use for registration.
Resource Model
Ansible modules work in terms of resources. For instance, the
:ref:`file module <file_module>` will select a particular file and ensure
that the attributes of that resource match a particular model. As an
example, we might wish to change the owner of :file:`/etc/motd` to
``root`` if it is not already set to ``root``, or set its mode to
``0644`` if it is not already set to ``0644``. The resource models
are :term:`idempotent <idempotency>` meaning change commands are not
run unless needed, and Ansible will bring the system back to a desired
state regardless of the actual state -- rather than you having to tell
it how to get to the state.
Roles
Roles are units of organization in Ansible. Assigning a role to
a group of :term:`hosts <Host>` (or a set of :term:`groups <group>`,
or :term:`host patterns <Globbing>`, and so on) implies that they should
implement a specific behavior. A role may include applying certain
variable values, certain :term:`tasks`, and certain :term:`handlers`
-- or just one or more of these things. Because of the file structure
associated with a role, roles become redistributable units that allow
you to share behavior among :term:`playbooks` -- or even with other users.
Rolling Update
The act of addressing a number of nodes in a group N at a time to
avoid updating them all at once and bringing the system offline. For
instance, in a web topology of 500 nodes handling very large volume,
it may be reasonable to update 10 or 20 machines at a time, moving on
to the next 10 or 20 when done. The ``serial:`` keyword in an Ansible
:term:`playbooks` control the size of the rolling update pool. The
default is to address the batch size all at once, so this is something
that you must opt-in to. OS configuration (such as making sure config
files are correct) does not typically have to use the rolling update
model, but can do so if desired.
Serial
.. seealso::
:term:`Rolling Update`
Sudo
Ansible does not require root logins, and since it's daemonless,
definitely does not require root level daemons (which can be
a security concern in sensitive environments). Ansible can log in and
perform many operations wrapped in a sudo command, and can work with
both password-less and password-based sudo. Some operations that
don't normally work with sudo (like scp file transfer) can be achieved
with Ansible's :ref:`copy <copy_module>`, :ref:`template <template_module>`, and
:ref:`fetch <fetch_module>` modules while running in sudo mode.
SSH (Native)
Native OpenSSH as an Ansible transport is specified with ``-c ssh``
(or a config file, or a keyword in the :term:`playbook <playbooks>`)
and can be useful if wanting to login via Kerberized SSH or using SSH
jump hosts, and so on. In 1.2.1, ``ssh`` will be used by default if the
OpenSSH binary on the control machine is sufficiently new.
Previously, Ansible selected ``paramiko`` as a default. Using
a client that supports ``ControlMaster`` and ``ControlPersist`` is
recommended for maximum performance -- if you don't have that and
don't need Kerberos, jump hosts, or other features, ``paramiko`` is
a good choice. Ansible will warn you if it doesn't detect
ControlMaster/ControlPersist capability.
Tags
Ansible allows tagging resources in a :term:`playbook <playbooks>`
with arbitrary keywords, and then running only the parts of the
playbook that correspond to those keywords. For instance, it is
possible to have an entire OS configuration, and have certain steps
labeled ``ntp``, and then run just the ``ntp`` steps to reconfigure
the time server information on a remote host.
Task
:term:`Playbooks` exist to run tasks. Tasks combine an :term:`action`
(a module and its arguments) with a name and optionally some other
keywords (like :term:`looping keywords <loops>`). :term:`Handlers`
are also tasks, but they are a special kind of task that do not run
unless they are notified by name when a task reports an underlying
change on a remote system.
Tasks
A list of :term:`Task`.
Templates
Ansible can easily transfer files to remote systems but often it is
desirable to substitute variables in other files. Variables may come
from the :term:`inventory` file, :term:`Host Vars`, :term:`Group
Vars`, or :term:`Facts`. Templates use the :term:`Jinja2` template
engine and can also include logical constructs like loops and if
statements.
Transport
Ansible uses :term:``Connection Plugins`` to define types of available
transports. These are simply how Ansible will reach out to managed
systems. Transports included are :term:`paramiko`,
:term:`ssh <SSH (Native)>` (using OpenSSH), and
:term:`local <Local Connection>`.
When
An optional conditional statement attached to a :term:`task <tasks>` that is used to
determine if the task should run or not. If the expression following
the ``when:`` keyword evaluates to false, the task will be ignored.
Vars (Variables)
As opposed to :term:`Facts`, variables are names of values (they can
be simple scalar values -- integers, booleans, strings) or complex
ones (dictionaries/hashes, lists) that can be used in templates and
:term:`playbooks`. They are declared things, not things that are
inferred from the remote system's current state or nature (which is
what Facts are).
YAML
Ansible does not want to force people to write programming language
code to automate infrastructure, so Ansible uses YAML to define
:term:`playbook <playbooks>` configuration languages and also variable
files. YAML is nice because it has a minimum of syntax and is very
clean and easy for people to skim. It is a good data format for
configuration files and humans, but also machine readable. Ansible's
usage of YAML stemmed from Michael DeHaan's first use of it inside of
Cobbler around 2006. YAML is fairly popular in the dynamic language
community and the format has libraries available for serialization in
many languages (Python, Perl, Ruby, and so on).
.. seealso::
:ref:`ansible_faq`
Frequently asked questions
:ref:`working_with_playbooks`
An introduction to playbooks
:ref:`playbooks_best_practices`
Tips and tricks for playbooks
`User Mailing List <https://groups.google.com/group/ansible-devel>`_
Have a question? Stop by the google group!
:ref:`communication_irc`
How to join Ansible chat channels
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,347 |
Clarify in docs that Community Galaxy cannot provide signatures
|
### Summary
The documentation seems to imply that the community Galaxy server can supply signatures, which it can't. Clarify that only AH and self-hosted pulp or on-prem hub servers will have signatures available:
https://github.com/ansible/ansible/pull/78326/files#diff-3147c791e9e90d93c0f1468a3d5769f74d598bf69ed68f3553544454d36b3fc5R276-R285
### Issue Type
Documentation Report
### Component Name
docs/docsite/rst/user_guide/collections_using.rst
### Ansible Version
```console
$ ansible --version
2.13
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
none
```
### OS / Environment
none
### Additional Information
Fix needs to be backported to 2.13 as well
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78347
|
https://github.com/ansible/ansible/pull/78386
|
ccf1a631585149c612250f466044f88b43518393
|
7ae1eabaa41080fc97e0155919471f57dba23825
| 2022-07-25T19:53:08Z |
python
| 2022-08-16T20:05:06Z |
docs/docsite/rst/shared_snippets/installing_collections.txt
|
By default, ``ansible-galaxy collection install`` uses https://galaxy.ansible.com as the Galaxy server (as listed in the
:file:`ansible.cfg` file under :ref:`galaxy_server`). You do not need any
further configuration.
See :ref:`Configuring the ansible-galaxy client <galaxy_server_config>` if you are using any other Galaxy server, such as Red Hat Automation Hub.
To install a collection hosted in Galaxy:
.. code-block:: bash
ansible-galaxy collection install my_namespace.my_collection
To upgrade a collection to the latest available version from the Galaxy server you can use the ``--upgrade`` option:
.. code-block:: bash
ansible-galaxy collection install my_namespace.my_collection --upgrade
You can also directly use the tarball from your build:
.. code-block:: bash
ansible-galaxy collection install my_namespace-my_collection-1.0.0.tar.gz -p ./collections
You can build and install a collection from a local source directory. The ``ansible-galaxy`` utility builds the collection using the ``MANIFEST.json`` or ``galaxy.yml``
metadata in the directory.
.. code-block:: bash
ansible-galaxy collection install /path/to/collection -p ./collections
You can also install multiple collections in a namespace directory.
.. code-block:: text
ns/
├── collection1/
│ ├── MANIFEST.json
│ └── plugins/
└── collection2/
├── galaxy.yml
└── plugins/
.. code-block:: bash
ansible-galaxy collection install /path/to/ns -p ./collections
.. note::
The install command automatically appends the path ``ansible_collections`` to the one specified with the ``-p`` option unless the
parent directory is already in a folder called ``ansible_collections``.
When using the ``-p`` option to specify the install path, use one of the values configured in :ref:`COLLECTIONS_PATHS`, as this is
where Ansible itself will expect to find collections. If you don't specify a path, ``ansible-galaxy collection install`` installs
the collection to the first path defined in :ref:`COLLECTIONS_PATHS`, which by default is ``~/.ansible/collections``
You can also keep a collection adjacent to the current playbook, under a ``collections/ansible_collections/`` directory structure.
.. code-block:: text
./
├── play.yml
├── collections/
│ └── ansible_collections/
│ └── my_namespace/
│ └── my_collection/<collection structure lives here>
See :ref:`collection_structure` for details on the collection directory structure.
Collections signed by a Galaxy server can be verified during installation with GnuPG. To opt into signature verification, configure a keyring for ``ansible-galaxy`` with native GnuPG tooling and provide the file path with the ``--keyring`` CLI option or ref:`GALAXY_GPG_KEYRING`. Signatures provided by the Galaxy server will be used to verify the collection's ``MANIFEST.json``.
Use the ``--signature`` option to verify the collection's ``MANIFEST.json`` with additional signatures to those provided by the Galaxy server. Supplemental signatures should be provided as URIs.
.. code-block:: bash
ansible-galaxy collection install my_namespace.my_collection --signature https://examplehost.com/detached_signature.asc --keyring ~/.ansible/pubring.kbx
GnuPG verification only occurs for collections installed from a Galaxy server. User-provided signatures are not used to verify collections installed from git repositories, source directories, or URLs/paths to tar.gz files.
By default, verification is considered successful if a minimum of 1 signature successfully verifies the collection. The number of required signatures can be configured with ``--required-valid-signature-count`` or :ref:`GALAXY_REQUIRED_VALID_SIGNATURE_COUNT`. All signatures can be required by setting the option to ``all``. To fail signature verification if no valid signatures are found, prepend the value with ``+``, such as ``+all`` or ``+1``.
.. code-block:: bash
export ANSIBLE_GALAXY_GPG_KEYRING=~/.ansible/pubring.kbx
export ANSIBLE_GALAXY_REQUIRED_VALID_SIGNATURE_COUNT=2
ansible-galaxy collection install my_namespace.my_collection --signature https://examplehost.com/detached_signature.asc --signature file:///path/to/local/detached_signature.asc
Certain GnuPG errors can be ignored with ``--ignore-signature-status-code`` or :ref:`GALAXY_REQUIRED_VALID_SIGNATURE_COUNT`. :ref:`GALAXY_REQUIRED_VALID_SIGNATURE_COUNT` should be a list, and ``--ignore-signature-status-code`` can be provided multiple times to ignore multiple additional error status codes.
This example requires any signatures provided by the Galaxy server to verify the collection except if they fail due to NO_PUBKEY:
.. code-block:: bash
export ANSIBLE_GALAXY_GPG_KEYRING=~/.ansible/pubring.kbx
export ANSIBLE_GALAXY_REQUIRED_VALID_SIGNATURE_COUNT=all
ansible-galaxy collection install my_namespace.my_collection --ignore-signature-status-code NO_PUBKEY
If verification fails for the example above, only errors other than NO_PUBKEY will be displayed.
If verification is unsuccessful, the collection will not be installed. GnuPG signature verification can be disabled with ``--disable-gpg-verify`` or by configuring :ref:`GALAXY_DISABLE_GPG_VERIFY`.
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,347 |
Clarify in docs that Community Galaxy cannot provide signatures
|
### Summary
The documentation seems to imply that the community Galaxy server can supply signatures, which it can't. Clarify that only AH and self-hosted pulp or on-prem hub servers will have signatures available:
https://github.com/ansible/ansible/pull/78326/files#diff-3147c791e9e90d93c0f1468a3d5769f74d598bf69ed68f3553544454d36b3fc5R276-R285
### Issue Type
Documentation Report
### Component Name
docs/docsite/rst/user_guide/collections_using.rst
### Ansible Version
```console
$ ansible --version
2.13
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
none
```
### OS / Environment
none
### Additional Information
Fix needs to be backported to 2.13 as well
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78347
|
https://github.com/ansible/ansible/pull/78386
|
ccf1a631585149c612250f466044f88b43518393
|
7ae1eabaa41080fc97e0155919471f57dba23825
| 2022-07-25T19:53:08Z |
python
| 2022-08-16T20:05:06Z |
docs/docsite/rst/user_guide/collections_using.rst
|
.. _collections:
*****************
Using collections
*****************
Collections are a distribution format for Ansible content that can include playbooks, roles, modules, and plugins. As modules move from the core Ansible repository into collections, the module documentation will move to the :ref:`collections pages <list_of_collections>`.
You can install and use collections through `Ansible Galaxy <https://galaxy.ansible.com>`_.
* For details on how to *develop* collections see :ref:`developing_collections`.
* For the current development status of Collections and FAQ see `Ansible Collections Community Guide <https://github.com/ansible-collections/overview/blob/main/README.rst>`_.
.. contents::
:local:
:depth: 2
.. _collections_installing:
Installing collections
======================
.. note::
If you install a collection manually as described in this paragraph, the collection will not be upgraded automatically when you upgrade the ``ansible`` package or ``ansible-core``.
Installing collections with ``ansible-galaxy``
----------------------------------------------
.. include:: ../shared_snippets/installing_collections.txt
.. _collections_older_version:
Installing an older version of a collection
-------------------------------------------
.. include:: ../shared_snippets/installing_older_collection.txt
.. _collection_requirements_file:
Install multiple collections with a requirements file
-----------------------------------------------------
.. include:: ../shared_snippets/installing_multiple_collections.txt
.. _collection_offline_download:
Downloading a collection for offline use
-----------------------------------------
.. include:: ../shared_snippets/download_tarball_collections.txt
Installing a collection from source files
-----------------------------------------
.. include:: ../shared_snippets/installing_collections_file.rst
Installing a collection from a git repository
---------------------------------------------
.. include:: ../shared_snippets/installing_collections_git_repo.txt
.. _galaxy_server_config:
Configuring the ``ansible-galaxy`` client
------------------------------------------
.. include:: ../shared_snippets/galaxy_server_list.txt
.. _collections_downloading:
Downloading collections
=======================
To download a collection and its dependencies for an offline install, run ``ansible-galaxy collection download``. This
downloads the collections specified and their dependencies to the specified folder and creates a ``requirements.yml``
file which can be used to install those collections on a host without access to a Galaxy server. All the collections
are downloaded by default to the ``./collections`` folder.
Just like the ``install`` command, the collections are sourced based on the
:ref:`configured galaxy server config <galaxy_server_config>`. Even if a collection to download was specified by a URL
or path to a tarball, the collection will be redownloaded from the configured Galaxy server.
Collections can be specified as one or multiple collections or with a ``requirements.yml`` file just like
``ansible-galaxy collection install``.
To download a single collection and its dependencies:
.. code-block:: bash
ansible-galaxy collection download my_namespace.my_collection
To download a single collection at a specific version:
.. code-block:: bash
ansible-galaxy collection download my_namespace.my_collection:1.0.0
To download multiple collections either specify multiple collections as command line arguments as shown above or use a
requirements file in the format documented with :ref:`collection_requirements_file`.
.. code-block:: bash
ansible-galaxy collection download -r requirements.yml
You can also download a source collection directory. The collection is built with the mandatory ``galaxy.yml`` file.
.. code-block:: bash
ansible-galaxy collection download /path/to/collection
ansible-galaxy collection download git+file:///path/to/collection/.git
You can download multiple source collections from a single namespace by providing the path to the namespace.
.. code-block:: text
ns/
├── collection1/
│ ├── galaxy.yml
│ └── plugins/
└── collection2/
├── galaxy.yml
└── plugins/
.. code-block:: bash
ansible-galaxy collection install /path/to/ns
All the collections are downloaded by default to the ``./collections`` folder but you can use ``-p`` or
``--download-path`` to specify another path:
.. code-block:: bash
ansible-galaxy collection download my_namespace.my_collection -p ~/offline-collections
Once you have downloaded the collections, the folder contains the collections specified, their dependencies, and a
``requirements.yml`` file. You can use this folder as is with ``ansible-galaxy collection install`` to install the
collections on a host without access to a Galaxy server.
.. code-block:: bash
# This must be run from the folder that contains the offline collections and requirements.yml file downloaded
# by the internet-connected host
cd ~/offline-collections
ansible-galaxy collection install -r requirements.yml
.. _collections_listing:
Listing collections
===================
To list installed collections, run ``ansible-galaxy collection list``. This shows all of the installed collections found in the configured collections search paths. It will also show collections under development which contain a galaxy.yml file instead of a MANIFEST.json. The path where the collections are located are displayed as well as version information. If no version information is available, a ``*`` is displayed for the version number.
.. code-block:: shell
# /home/astark/.ansible/collections/ansible_collections
Collection Version
-------------------------- -------
cisco.aci 0.0.5
cisco.mso 0.0.4
sandwiches.ham *
splunk.es 0.0.5
# /usr/share/ansible/collections/ansible_collections
Collection Version
----------------- -------
fortinet.fortios 1.0.6
pureport.pureport 0.0.8
sensu.sensu_go 1.3.0
Run with ``-vvv`` to display more detailed information.
To list a specific collection, pass a valid fully qualified collection name (FQCN) to the command ``ansible-galaxy collection list``. All instances of the collection will be listed.
.. code-block:: shell
> ansible-galaxy collection list fortinet.fortios
# /home/astark/.ansible/collections/ansible_collections
Collection Version
---------------- -------
fortinet.fortios 1.0.1
# /usr/share/ansible/collections/ansible_collections
Collection Version
---------------- -------
fortinet.fortios 1.0.6
To search other paths for collections, use the ``-p`` option. Specify multiple search paths by separating them with a ``:``. The list of paths specified on the command line will be added to the beginning of the configured collections search paths.
.. code-block:: shell
> ansible-galaxy collection list -p '/opt/ansible/collections:/etc/ansible/collections'
# /opt/ansible/collections/ansible_collections
Collection Version
--------------- -------
sandwiches.club 1.7.2
# /etc/ansible/collections/ansible_collections
Collection Version
-------------- -------
sandwiches.pbj 1.2.0
# /home/astark/.ansible/collections/ansible_collections
Collection Version
-------------------------- -------
cisco.aci 0.0.5
cisco.mso 0.0.4
fortinet.fortios 1.0.1
sandwiches.ham *
splunk.es 0.0.5
# /usr/share/ansible/collections/ansible_collections
Collection Version
----------------- -------
fortinet.fortios 1.0.6
pureport.pureport 0.0.8
sensu.sensu_go 1.3.0
.. _using_collections:
Verifying collections
=====================
Verifying collections with ``ansible-galaxy``
---------------------------------------------
Once installed, you can verify that the content of the installed collection matches the content of the collection on the server. This feature expects that the collection is installed in one of the configured collection paths and that the collection exists on one of the configured galaxy servers.
.. code-block:: bash
ansible-galaxy collection verify my_namespace.my_collection
The output of the ``ansible-galaxy collection verify`` command is quiet if it is successful. If a collection has been modified, the altered files are listed under the collection name.
.. code-block:: bash
ansible-galaxy collection verify my_namespace.my_collection
Collection my_namespace.my_collection contains modified content in the following files:
my_namespace.my_collection
plugins/inventory/my_inventory.py
plugins/modules/my_module.py
You can use the ``-vvv`` flag to display additional information, such as the version and path of the installed collection, the URL of the remote collection used for validation, and successful verification output.
.. code-block:: bash
ansible-galaxy collection verify my_namespace.my_collection -vvv
...
Verifying 'my_namespace.my_collection:1.0.0'.
Installed collection found at '/path/to/ansible_collections/my_namespace/my_collection/'
Remote collection found at 'https://galaxy.ansible.com/download/my_namespace-my_collection-1.0.0.tar.gz'
Successfully verified that checksums for 'my_namespace.my_collection:1.0.0' match the remote collection
If you have a pre-release or non-latest version of a collection installed you should include the specific version to verify. If the version is omitted, the installed collection is verified against the latest version available on the server.
.. code-block:: bash
ansible-galaxy collection verify my_namespace.my_collection:1.0.0
In addition to the ``namespace.collection_name:version`` format, you can provide the collections to verify in a ``requirements.yml`` file. Dependencies listed in ``requirements.yml`` are not included in the verify process and should be verified separately.
.. code-block:: bash
ansible-galaxy collection verify -r requirements.yml
Verifying against ``tar.gz`` files is not supported. If your ``requirements.yml`` contains paths to tar files or URLs for installation, you can use the ``--ignore-errors`` flag to ensure that all collections using the ``namespace.name`` format in the file are processed.
Signature verification
----------------------
If a collection has been signed by the Galaxy server, the server will provide ASCII armored, detached signatures to verify the authenticity of the MANIFEST.json before using it to verify the collection's contents. You must opt into signature verification by :ref:`configuring a keyring <galaxy_gpg_keyring>` for ``ansible-galaxy``, or by providing the path with the ``--keyring`` option.
To import a public key into a keyring for use with ``ansible-galaxy`` use the following step.
.. code-block:: bash
gpg --import --no-default-keyring --keyring ~/.ansible/pubring.kbx my-public-key.asc
In addition to any signatures provided by the Galaxy server, signature sources can also be provided in the requirements file and on the command line. Signature sources should be URIs.
You can manually generate detached signatures for a collection using the ``gpg`` CLI using the following step. This step assume you have generated a GPG private key, but do not cover this process.
.. code-block:: bash
ansible-galaxy collection build
tar -Oxzf namespace-name-1.0.0.tar.gz MANIFEST.json | gpg --output namespace-name-1.0.0.asc --detach-sign --armor --local-user [email protected] -
Use the ``--signature`` option to verify collection name(s) provided on the CLI with an additional signature. This option can be used multiple times to provide multiple signatures.
.. code-block:: bash
ansible-galaxy collection verify my_namespace.my_collection --signature https://examplehost.com/detached_signature.asc --signature file:///path/to/local/detached_signature.asc --keyring ~/.ansible/pubring.kbx
Collections in a requirements file should list any additional signature sources following the collection's "signatures" key.
.. code-block:: yaml
# requirements.yml
collections:
- name: ns.coll
version: 1.0.0
signatures:
- https://examplehost.com/detached_signature.asc
- file:///path/to/local/detached_signature.asc
.. code-block:: bash
ansible-galaxy collection verify -r requirements.yml --keyring ~/.ansible/pubring.kbx
When a collection is installed from a Galaxy server, the signatures provided by the server to verify the collection's authenticity are saved alongside the installed collections. This data is used to verify the internal consistency of the collection without querying the Galaxy server again when the ``--offline`` option is provided.
.. code-block:: bash
ansible-galaxy collection verify my_namespace.my_collection --offline --keyring ~/.ansible/pubring.kbx
.. _collections_using_playbook:
Using collections in a Playbook
===============================
Once installed, you can reference a collection content by its fully qualified collection name (FQCN):
.. code-block:: yaml
- hosts: all
tasks:
- my_namespace.my_collection.mymodule:
option1: value
This works for roles or any type of plugin distributed within the collection:
.. code-block:: yaml
- hosts: all
tasks:
- import_role:
name: my_namespace.my_collection.role1
- my_namespace.mycollection.mymodule:
option1: value
- debug:
msg: '{{ lookup("my_namespace.my_collection.lookup1", 'param1')| my_namespace.my_collection.filter1 }}'
Simplifying module names with the ``collections`` keyword
=========================================================
The ``collections`` keyword lets you define a list of collections that your role or playbook should search for unqualified module and action names. So you can use the ``collections`` keyword, then simply refer to modules and action plugins by their short-form names throughout that role or playbook.
.. warning::
If your playbook uses both the ``collections`` keyword and one or more roles, the roles do not inherit the collections set by the playbook. This is one of the reasons we recommend you always use FQCN. See below for roles details.
Using ``collections`` in roles
------------------------------
Within a role, you can control which collections Ansible searches for the tasks inside the role using the ``collections`` keyword in the role's ``meta/main.yml``. Ansible will use the collections list defined inside the role even if the playbook that calls the role defines different collections in a separate ``collections`` keyword entry. Roles defined inside a collection always implicitly search their own collection first, so you don't need to use the ``collections`` keyword to access modules, actions, or other roles contained in the same collection.
.. code-block:: yaml
# myrole/meta/main.yml
collections:
- my_namespace.first_collection
- my_namespace.second_collection
- other_namespace.other_collection
Using ``collections`` in playbooks
----------------------------------
In a playbook, you can control the collections Ansible searches for modules and action plugins to execute. However, any roles you call in your playbook define their own collections search order; they do not inherit the calling playbook's settings. This is true even if the role does not define its own ``collections`` keyword.
.. code-block:: yaml
- hosts: all
collections:
- my_namespace.my_collection
tasks:
- import_role:
name: role1
- mymodule:
option1: value
- debug:
msg: '{{ lookup("my_namespace.my_collection.lookup1", "param1")| my_namespace.my_collection.filter1 }}'
The ``collections`` keyword merely creates an ordered 'search path' for non-namespaced plugin and role references. It does not install content or otherwise change Ansible's behavior around the loading of plugins or roles. Note that an FQCN is still required for non-action or module plugins (for example, lookups, filters, tests).
When using the ``collections`` keyword, it is not necessary to add in ``ansible.builtin`` as part of the search list. When left omitted, the following content is available by default:
1. Standard ansible modules and plugins available through ``ansible-base``/``ansible-core``
2. Support for older 3rd party plugin paths
In general, it is preferable to use a module or plugin's FQCN over the ``collections`` keyword and the short name for all content in ``ansible-core``
Using a playbook from a collection
==================================
.. versionadded:: 2.11
You can also distribute playbooks in your collection and invoke them using the same semantics you use for plugins:
.. code-block:: shell
ansible-playbook my_namespace.my_collection.playbook1 -i ./myinventory
From inside a playbook:
.. code-block:: yaml
- import_playbook: my_namespace.my_collection.playbookX
A few recommendations when creating such playbooks, ``hosts:`` should be generic or at least have a variable input.
.. code-block:: yaml
- hosts: all # Use --limit or customized inventory to restrict hosts targeted
- hosts: localhost # For things you want to restrict to the controller
- hosts: '{{target|default("webservers")}}' # Assumes inventory provides a 'webservers' group, but can also use ``-e 'target=host1,host2'``
This will have an implied entry in the ``collections:`` keyword of ``my_namespace.my_collection`` just as with roles.
.. note::
Playbook names, like other collection resources, have a restricted set of valid characters.
Names can contain only lowercase alphanumeric characters, plus _ and must start with an alpha character. The dash ``-`` character is not valid for playbook names in collections.
Playbooks whose names contain invalid characters are not addressable: this is a limitation of the Python importer that is used to load collection resources.
.. seealso::
:ref:`developing_collections`
Develop or modify a collection.
:ref:`collections_galaxy_meta`
Understand the collections metadata structure.
`Mailing List <https://groups.google.com/group/ansible-devel>`_
The development mailing list
:ref:`communication_irc`
How to join Ansible chat channels
`Automation Hub <https://access.redhat.com/documentation/en-us/red_hat_ansible_automation_platform/>`_
Learn how to use collections with Red Hat Automation Hub
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 72,030 |
On systems with slow disks, Ansible 2.10 runs generally much slower than 2.9
|
##### SUMMARY
At first, I thought this may just be a problem that was caused by 2.10 using collections and shipping with dozens of collections out of the box when you `pip install` it now.
But after exploring further, I found that basic `ansible` commands like `ansible --version` are 3x slower than Ansible 2.9, even if I'm just installing and using `ansible-base`, with _no collections_ installed.
Note that these tests were done on a Raspberry Pi 4 (after noticing it took about 2 minutes to run `ansible --version` on my Pi Zero after upgrading to 2.10). I haven't yet tested on my Mac, where the system's blazing-fast NVM drive and i9 CPU will make the absolute numbers much better—but I would like to see if the _relative_ performance difference is the same.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
ansible (and other `ansible-*` commands)
##### ANSIBLE VERSION
```paste below
$ time ansible --version
ansible 2.10.1
config file = /home/pi/pi-webcam/ansible.cfg
configured module search path = ['/home/pi/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.7/dist-packages/ansible
executable location = /usr/local/bin/ansible
python version = 3.7.3 (default, Jul 25 2020, 13:03:44) [GCC 8.3.0]
```
##### CONFIGURATION
```paste below
N/A
```
##### OS / ENVIRONMENT
Linux (Debian 10, Raspberry Pi OS)
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```yaml
pip3 install -y ansible
time ansible --version # run 4 times, discard 1st result
pip3 uninstall -y ansible ansible-base
pip3 install -y ansible-base
time ansible --version # run 4 times, discard 1st result
pip3 uninstall -y ansible ansible-base
pip3 install -y ansible==2.9.*
time ansible --version # run 4 times, discard 1st result
pip3 uninstall -y ansible ansible-base
```
##### EXPECTED RESULTS
2.10.1 version should be at least _similar_, maybe a little slower, but not twice or three times slower.
##### ACTUAL RESULTS
Benchmarking results:
| Ansible version | Pip package size | Time | Delta vs 2.9 |
| --- | --- | --- | --- |
| 2.9.13 | 16.2MB | 2.09s | - |
| 2.10.0 (ansible) | 43.1MB | 6.09s | 3x slower |
| 2.10.1 (ansible-base) | 1.9MB | 6.33s | 3x slower |
|
https://github.com/ansible/ansible/issues/72030
|
https://github.com/ansible/ansible/pull/78483
|
7ae1eabaa41080fc97e0155919471f57dba23825
|
33ee5d4c1b8ea5357e29749a71f55d8f03642b9e
| 2020-09-30T17:32:29Z |
python
| 2022-08-16T20:06:25Z |
docs/docsite/rst/reference_appendices/faq.rst
|
.. _ansible_faq:
Frequently Asked Questions
==========================
Here are some commonly asked questions and their answers.
.. _collections_transition:
Where did all the modules go?
+++++++++++++++++++++++++++++
In July, 2019, we announced that collections would be the `future of Ansible content delivery <https://www.ansible.com/blog/the-future-of-ansible-content-delivery>`_. A collection is a distribution format for Ansible content that can include playbooks, roles, modules, and plugins. In Ansible 2.9 we added support for collections. In Ansible 2.10 we `extracted most modules from the main ansible/ansible repository <https://access.redhat.com/solutions/5295121>`_ and placed them in :ref:`collections <list_of_collections>`. Collections may be maintained by the Ansible team, by the Ansible community, or by Ansible partners. The `ansible/ansible repository <https://github.com/ansible/ansible>`_ now contains the code for basic features and functions, such as copying module code to managed nodes. This code is also known as ``ansible-core`` (it was briefly called ``ansible-base`` for version 2.10).
* To learn more about using collections, see :ref:`collections`.
* To learn more about developing collections, see :ref:`developing_collections`.
* To learn more about contributing to existing collections, see the individual collection repository for guidelines, or see :ref:`contributing_maintained_collections` to contribute to one of the Ansible-maintained collections.
.. _find_my_module:
Where did this specific module go?
++++++++++++++++++++++++++++++++++
IF you are searching for a specific module, you can check the `runtime.yml <https://github.com/ansible/ansible/blob/devel/lib/ansible/config/ansible_builtin_runtime.yml>`_ file, which lists the first destination for each module that we extracted from the main ansible/ansible repository. Some modules have moved again since then. You can also search on `Ansible Galaxy <https://galaxy.ansible.com/>`_ or ask on one of our :ref:`chat channels <communication_irc>`.
.. _set_environment:
How can I set the PATH or any other environment variable for a task or entire play?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Setting environment variables can be done with the `environment` keyword. It can be used at the task or other levels in the play.
.. code-block:: yaml
shell:
cmd: date
environment:
LANG=fr_FR.UTF-8
.. code-block:: yaml
hosts: servers
environment:
PATH: "{{ ansible_env.PATH }}:/thingy/bin"
SOME: value
.. note:: starting in 2.0.1 the setup task from ``gather_facts`` also inherits the environment directive from the play, you might need to use the ``|default`` filter to avoid errors if setting this at play level.
.. _faq_setting_users_and_ports:
How do I handle different machines needing different user accounts or ports to log in with?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Setting inventory variables in the inventory file is the easiest way.
For instance, suppose these hosts have different usernames and ports:
.. code-block:: ini
[webservers]
asdf.example.com ansible_port=5000 ansible_user=alice
jkl.example.com ansible_port=5001 ansible_user=bob
You can also dictate the connection type to be used, if you want:
.. code-block:: ini
[testcluster]
localhost ansible_connection=local
/path/to/chroot1 ansible_connection=chroot
foo.example.com ansible_connection=paramiko
You may also wish to keep these in group variables instead, or file them in a group_vars/<groupname> file.
See the rest of the documentation for more information about how to organize variables.
.. _use_ssh:
How do I get ansible to reuse connections, enable Kerberized SSH, or have Ansible pay attention to my local SSH config file?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Switch your default connection type in the configuration file to ``ssh``, or use ``-c ssh`` to use
Native OpenSSH for connections instead of the python paramiko library. In Ansible 1.2.1 and later, ``ssh`` will be used
by default if OpenSSH is new enough to support ControlPersist as an option.
Paramiko is great for starting out, but the OpenSSH type offers many advanced options. You will want to run Ansible
from a machine new enough to support ControlPersist, if you are using this connection type. You can still manage
older clients. If you are using RHEL 6, CentOS 6, SLES 10 or SLES 11 the version of OpenSSH is still a bit old, so
consider managing from a Fedora or openSUSE client even though you are managing older nodes, or just use paramiko.
We keep paramiko as the default as if you are first installing Ansible on these enterprise operating systems, it offers a better experience for new users.
.. _use_ssh_jump_hosts:
How do I configure a jump host to access servers that I have no direct access to?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
You can set a ``ProxyCommand`` in the
``ansible_ssh_common_args`` inventory variable. Any arguments specified in
this variable are added to the sftp/scp/ssh command line when connecting
to the relevant host(s). Consider the following inventory group:
.. code-block:: ini
[gatewayed]
foo ansible_host=192.0.2.1
bar ansible_host=192.0.2.2
You can create `group_vars/gatewayed.yml` with the following contents::
ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q [email protected]"'
Ansible will append these arguments to the command line when trying to
connect to any hosts in the group ``gatewayed``. (These arguments are used
in addition to any ``ssh_args`` from ``ansible.cfg``, so you do not need to
repeat global ``ControlPersist`` settings in ``ansible_ssh_common_args``.)
Note that ``ssh -W`` is available only with OpenSSH 5.4 or later. With
older versions, it's necessary to execute ``nc %h:%p`` or some equivalent
command on the bastion host.
With earlier versions of Ansible, it was necessary to configure a
suitable ``ProxyCommand`` for one or more hosts in ``~/.ssh/config``,
or globally by setting ``ssh_args`` in ``ansible.cfg``.
.. _ssh_serveraliveinterval:
How do I get Ansible to notice a dead target in a timely manner?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
You can add ``-o ServerAliveInterval=NumberOfSeconds`` in ``ssh_args`` from ``ansible.cfg``. Without this option,
SSH and therefore Ansible will wait until the TCP connection times out. Another solution is to add ``ServerAliveInterval``
into your global SSH configuration. A good value for ``ServerAliveInterval`` is up to you to decide; keep in mind that
``ServerAliveCountMax=3`` is the SSH default so any value you set will be tripled before terminating the SSH session.
.. _cloud_provider_performance:
How do I speed up run of ansible for servers from cloud providers (EC2, openstack,.. )?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Don't try to manage a fleet of machines of a cloud provider from your laptop.
Rather connect to a management node inside this cloud provider first and run Ansible from there.
.. _python_interpreters:
How do I handle not having a Python interpreter at /usr/bin/python on a remote machine?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
While you can write Ansible modules in any language, most Ansible modules are written in Python,
including the ones central to letting Ansible work.
By default, Ansible assumes it can find a :command:`/usr/bin/python` on your remote system that is
either Python2, version 2.6 or higher or Python3, 3.5 or higher.
Setting the inventory variable ``ansible_python_interpreter`` on any host will tell Ansible to
auto-replace the Python interpreter with that value instead. Thus, you can point to any Python you
want on the system if :command:`/usr/bin/python` on your system does not point to a compatible
Python interpreter.
Some platforms may only have Python 3 installed by default. If it is not installed as
:command:`/usr/bin/python`, you will need to configure the path to the interpreter via
``ansible_python_interpreter``. Although most core modules will work with Python 3, there may be some
special purpose ones which do not or you may encounter a bug in an edge case. As a temporary
workaround you can install Python 2 on the managed host and configure Ansible to use that Python via
``ansible_python_interpreter``. If there's no mention in the module's documentation that the module
requires Python 2, you can also report a bug on our `bug tracker
<https://github.com/ansible/ansible/issues>`_ so that the incompatibility can be fixed in a future release.
Do not replace the shebang lines of your python modules. Ansible will do this for you automatically at deploy time.
Also, this works for ANY interpreter, for example ruby: ``ansible_ruby_interpreter``, perl: ``ansible_perl_interpreter``, and so on,
so you can use this for custom modules written in any scripting language and control the interpreter location.
Keep in mind that if you put ``env`` in your module shebang line (``#!/usr/bin/env <other>``),
this facility will be ignored so you will be at the mercy of the remote `$PATH`.
.. _installation_faqs:
How do I handle the package dependencies required by Ansible package dependencies during Ansible installation ?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
While installing Ansible, sometimes you may encounter errors such as `No package 'libffi' found` or `fatal error: Python.h: No such file or directory`
These errors are generally caused by the missing packages, which are dependencies of the packages required by Ansible.
For example, `libffi` package is dependency of `pynacl` and `paramiko` (Ansible -> paramiko -> pynacl -> libffi).
In order to solve these kinds of dependency issues, you might need to install required packages using
the OS native package managers, such as `yum`, `dnf`, or `apt`, or as mentioned in the package installation guide.
Refer to the documentation of the respective package for such dependencies and their installation methods.
Common Platform Issues
++++++++++++++++++++++
What customer platforms does Red Hat support?
---------------------------------------------
A number of them! For a definitive list please see this `Knowledge Base article <https://access.redhat.com/articles/3168091>`_.
Running in a virtualenv
-----------------------
You can install Ansible into a virtualenv on the controller quite simply:
.. code-block:: shell
$ virtualenv ansible
$ source ./ansible/bin/activate
$ pip install ansible
If you want to run under Python 3 instead of Python 2 you may want to change that slightly:
.. code-block:: shell
$ virtualenv -p python3 ansible
$ source ./ansible/bin/activate
$ pip install ansible
If you need to use any libraries which are not available via pip (for instance, SELinux Python
bindings on systems such as Red Hat Enterprise Linux or Fedora that have SELinux enabled), then you
need to install them into the virtualenv. There are two methods:
* When you create the virtualenv, specify ``--system-site-packages`` to make use of any libraries
installed in the system's Python:
.. code-block:: shell
$ virtualenv ansible --system-site-packages
* Copy those files in manually from the system. For instance, for SELinux bindings you might do:
.. code-block:: shell
$ virtualenv ansible --system-site-packages
$ cp -r -v /usr/lib64/python3.*/site-packages/selinux/ ./py3-ansible/lib64/python3.*/site-packages/
$ cp -v /usr/lib64/python3.*/site-packages/*selinux*.so ./py3-ansible/lib64/python3.*/site-packages/
Running on macOS
----------------
When executing Ansible on a system with macOS as a controller machine one might encounter the following error:
.. error::
+[__NSCFConstantString initialize] may have been in progress in another thread when fork() was called. We cannot safely call it or ignore it in the fork() child process. Crashing instead. Set a breakpoint on objc_initializeAfterForkError to debug.
ERROR! A worker was found in a dead state
In general the recommended workaround is to set the following environment variable in your shell:
.. code-block:: shell
$ export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
Running on BSD
--------------
.. seealso:: :ref:`working_with_bsd`
Running on Solaris
------------------
By default, Solaris 10 and earlier run a non-POSIX shell which does not correctly expand the default
tmp directory Ansible uses ( :file:`~/.ansible/tmp`). If you see module failures on Solaris machines, this
is likely the problem. There are several workarounds:
* You can set ``remote_tmp`` to a path that will expand correctly with the shell you are using
(see the plugin documentation for :ref:`C shell<csh_shell>`, :ref:`fish shell<fish_shell>`,
and :ref:`Powershell<powershell_shell>`). For example, in the ansible config file you can set::
remote_tmp=$HOME/.ansible/tmp
In Ansible 2.5 and later, you can also set it per-host in inventory like this::
solaris1 ansible_remote_tmp=$HOME/.ansible/tmp
* You can set :ref:`ansible_shell_executable<ansible_shell_executable>` to the path to a POSIX compatible shell. For
instance, many Solaris hosts have a POSIX shell located at :file:`/usr/xpg4/bin/sh` so you can set
this in inventory like so::
solaris1 ansible_shell_executable=/usr/xpg4/bin/sh
(bash, ksh, and zsh should also be POSIX compatible if you have any of those installed).
Running on z/OS
---------------
There are a few common errors that one might run into when trying to execute Ansible on z/OS as a target.
* Version 2.7.6 of python for z/OS will not work with Ansible because it represents strings internally as EBCDIC.
To get around this limitation, download and install a later version of `python for z/OS <https://www.rocketsoftware.com/zos-open-source>`_ (2.7.13 or 3.6.1) that represents strings internally as ASCII. Version 2.7.13 is verified to work.
* When ``pipelining = False`` in `/etc/ansible/ansible.cfg` then Ansible modules are transferred in binary mode via sftp however execution of python fails with
.. error::
SyntaxError: Non-UTF-8 code starting with \'\\x83\' in file /a/user1/.ansible/tmp/ansible-tmp-1548232945.35-274513842609025/AnsiballZ_stat.py on line 1, but no encoding declared; see https://python.org/dev/peps/pep-0263/ for details
To fix it set ``pipelining = True`` in `/etc/ansible/ansible.cfg`.
* Python interpret cannot be found in default location ``/usr/bin/python`` on target host.
.. error::
/usr/bin/python: EDC5129I No such file or directory
To fix this set the path to the python installation in your inventory like so::
zos1 ansible_python_interpreter=/usr/lpp/python/python-2017-04-12-py27/python27/bin/python
* Start of python fails with ``The module libpython2.7.so was not found.``
.. error::
EE3501S The module libpython2.7.so was not found.
On z/OS, you must execute python from gnu bash. If gnu bash is installed at ``/usr/lpp/bash``, you can fix this in your inventory by specifying an ``ansible_shell_executable``::
zos1 ansible_shell_executable=/usr/lpp/bash/bin/bash
Running under fakeroot
----------------------
Some issues arise as ``fakeroot`` does not create a full nor POSIX compliant system by default.
It is known that it will not correctly expand the default tmp directory Ansible uses (:file:`~/.ansible/tmp`).
If you see module failures, this is likely the problem.
The simple workaround is to set ``remote_tmp`` to a path that will expand correctly (see documentation of the shell plugin you are using for specifics).
For example, in the ansible config file (or via environment variable) you can set::
remote_tmp=$HOME/.ansible/tmp
.. _use_roles:
What is the best way to make content reusable/redistributable?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
If you have not done so already, read all about "Roles" in the playbooks documentation. This helps you make playbook content
self-contained, and works well with things like git submodules for sharing content with others.
If some of these plugin types look strange to you, see the API documentation for more details about ways Ansible can be extended.
.. _configuration_file:
Where does the configuration file live and what can I configure in it?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
See :ref:`intro_configuration`.
.. _who_would_ever_want_to_disable_cowsay_but_ok_here_is_how:
How do I disable cowsay?
++++++++++++++++++++++++
If cowsay is installed, Ansible takes it upon itself to make your day happier when running playbooks. If you decide
that you would like to work in a professional cow-free environment, you can either uninstall cowsay, set ``nocows=1``
in ``ansible.cfg``, or set the :envvar:`ANSIBLE_NOCOWS` environment variable:
.. code-block:: shell-session
export ANSIBLE_NOCOWS=1
.. _browse_facts:
How do I see a list of all of the ansible\_ variables?
++++++++++++++++++++++++++++++++++++++++++++++++++++++
Ansible by default gathers "facts" about the machines under management, and these facts can be accessed in playbooks
and in templates. To see a list of all of the facts that are available about a machine, you can run the ``setup`` module
as an ad hoc action:
.. code-block:: shell-session
ansible -m setup hostname
This will print out a dictionary of all of the facts that are available for that particular host. You might want to pipe
the output to a pager.This does NOT include inventory variables or internal 'magic' variables. See the next question
if you need more than just 'facts'.
.. _browse_inventory_vars:
How do I see all the inventory variables defined for my host?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
By running the following command, you can see inventory variables for a host:
.. code-block:: shell-session
ansible-inventory --list --yaml
.. _browse_host_vars:
How do I see all the variables specific to my host?
+++++++++++++++++++++++++++++++++++++++++++++++++++
To see all host specific variables, which might include facts and other sources:
.. code-block:: shell-session
ansible -m debug -a "var=hostvars['hostname']" localhost
Unless you are using a fact cache, you normally need to use a play that gathers facts first, for facts included in the task above.
.. _host_loops:
How do I loop over a list of hosts in a group, inside of a template?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A pretty common pattern is to iterate over a list of hosts inside of a host group, perhaps to populate a template configuration
file with a list of servers. To do this, you can just access the "$groups" dictionary in your template, like this:
.. code-block:: jinja
{% for host in groups['db_servers'] %}
{{ host }}
{% endfor %}
If you need to access facts about these hosts, for instance, the IP address of each hostname,
you need to make sure that the facts have been populated. For example, make sure you have a play that talks to db_servers::
- hosts: db_servers
tasks:
- debug: msg="doesn't matter what you do, just that they were talked to previously."
Then you can use the facts inside your template, like this:
.. code-block:: jinja
{% for host in groups['db_servers'] %}
{{ hostvars[host]['ansible_eth0']['ipv4']['address'] }}
{% endfor %}
.. _programatic_access_to_a_variable:
How do I access a variable name programmatically?
+++++++++++++++++++++++++++++++++++++++++++++++++
An example may come up where we need to get the ipv4 address of an arbitrary interface, where the interface to be used may be supplied
via a role parameter or other input. Variable names can be built by adding strings together using "~", like so:
.. code-block:: jinja
{{ hostvars[inventory_hostname]['ansible_' ~ which_interface]['ipv4']['address'] }}
The trick about going through hostvars is necessary because it's a dictionary of the entire namespace of variables. ``inventory_hostname``
is a magic variable that indicates the current host you are looping over in the host loop.
In the example above, if your interface names have dashes, you must replace them with underscores:
.. code-block:: jinja
{{ hostvars[inventory_hostname]['ansible_' ~ which_interface | replace('_', '-') ]['ipv4']['address'] }}
Also see dynamic_variables_.
.. _access_group_variable:
How do I access a group variable?
+++++++++++++++++++++++++++++++++
Technically, you don't, Ansible does not really use groups directly. Groups are labels for host selection and a way to bulk assign variables,
they are not a first class entity, Ansible only cares about Hosts and Tasks.
That said, you could just access the variable by selecting a host that is part of that group, see first_host_in_a_group_ below for an example.
.. _first_host_in_a_group:
How do I access a variable of the first host in a group?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++
What happens if we want the ip address of the first webserver in the webservers group? Well, we can do that too. Note that if we
are using dynamic inventory, which host is the 'first' may not be consistent, so you wouldn't want to do this unless your inventory
is static and predictable. (If you are using AWX or the :ref:`Red Hat Ansible Automation Platform <ansible_platform>`, it will use database order, so this isn't a problem even if you are using cloud
based inventory scripts).
Anyway, here's the trick:
.. code-block:: jinja
{{ hostvars[groups['webservers'][0]]['ansible_eth0']['ipv4']['address'] }}
Notice how we're pulling out the hostname of the first machine of the webservers group. If you are doing this in a template, you
could use the Jinja2 '#set' directive to simplify this, or in a playbook, you could also use set_fact::
- set_fact: headnode={{ groups['webservers'][0] }}
- debug: msg={{ hostvars[headnode].ansible_eth0.ipv4.address }}
Notice how we interchanged the bracket syntax for dots -- that can be done anywhere.
.. _file_recursion:
How do I copy files recursively onto a target host?
+++++++++++++++++++++++++++++++++++++++++++++++++++
The ``copy`` module has a recursive parameter. However, take a look at the ``synchronize`` module if you want to do something more efficient
for a large number of files. The ``synchronize`` module wraps rsync. See the module index for info on both of these modules.
.. _shell_env:
How do I access shell environment variables?
++++++++++++++++++++++++++++++++++++++++++++
**On controller machine :** Access existing variables from controller use the ``env`` lookup plugin.
For example, to access the value of the HOME environment variable on the management machine::
---
# ...
vars:
local_home: "{{ lookup('env','HOME') }}"
**On target machines :** Environment variables are available via facts in the ``ansible_env`` variable:
.. code-block:: jinja
{{ ansible_env.HOME }}
If you need to set environment variables for TASK execution, see :ref:`playbooks_environment`
in the :ref:`Advanced Playbooks <playbooks_special_topics>` section.
There are several ways to set environment variables on your target machines. You can use the
:ref:`template <template_module>`, :ref:`replace <replace_module>`, or :ref:`lineinfile <lineinfile_module>`
modules to introduce environment variables into files. The exact files to edit vary depending on your OS
and distribution and local configuration.
.. _user_passwords:
How do I generate encrypted passwords for the user module?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Ansible ad hoc command is the easiest option:
.. code-block:: shell-session
ansible all -i localhost, -m debug -a "msg={{ 'mypassword' | password_hash('sha512', 'mysecretsalt') }}"
The ``mkpasswd`` utility that is available on most Linux systems is also a great option:
.. code-block:: shell-session
mkpasswd --method=sha-512
If this utility is not installed on your system (for example, you are using macOS) then you can still easily
generate these passwords using Python. First, ensure that the `Passlib <https://foss.heptapod.net/python-libs/passlib/-/wikis/home>`_
password hashing library is installed:
.. code-block:: shell-session
pip install passlib
Once the library is ready, SHA512 password values can then be generated as follows:
.. code-block:: shell-session
python -c "from passlib.hash import sha512_crypt; import getpass; print(sha512_crypt.using(rounds=5000).hash(getpass.getpass()))"
Use the integrated :ref:`hash_filters` to generate a hashed version of a password.
You shouldn't put plaintext passwords in your playbook or host_vars; instead, use :ref:`playbooks_vault` to encrypt sensitive data.
In OpenBSD, a similar option is available in the base system called ``encrypt (1)``
.. _dot_or_array_notation:
Ansible allows dot notation and array notation for variables. Which notation should I use?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
The dot notation comes from Jinja and works fine for variables without special
characters. If your variable contains dots (.), colons (:), or dashes (-), if
a key begins and ends with two underscores, or if a key uses any of the known
public attributes, it is safer to use the array notation. See :ref:`playbooks_variables`
for a list of the known public attributes.
.. code-block:: jinja
item[0]['checksum:md5']
item['section']['2.1']
item['region']['Mid-Atlantic']
It is {{ temperature['Celsius']['-3'] }} outside.
Also array notation allows for dynamic variable composition, see dynamic_variables_.
Another problem with 'dot notation' is that some keys can cause problems because they collide with attributes and methods of python dictionaries.
.. code-block:: jinja
item.update # this breaks if item is a dictionary, as 'update()' is a python method for dictionaries
item['update'] # this works
.. _argsplat_unsafe:
When is it unsafe to bulk-set task arguments from a variable?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
You can set all of a task's arguments from a dictionary-typed variable. This
technique can be useful in some dynamic execution scenarios. However, it
introduces a security risk. We do not recommend it, so Ansible issues a
warning when you do something like this::
#...
vars:
usermod_args:
name: testuser
state: present
update_password: always
tasks:
- user: '{{ usermod_args }}'
This particular example is safe. However, constructing tasks like this is
risky because the parameters and values passed to ``usermod_args`` could
be overwritten by malicious values in the ``host facts`` on a compromised
target machine. To mitigate this risk:
* set bulk variables at a level of precedence greater than ``host facts`` in the order of precedence
found in :ref:`ansible_variable_precedence` (the example above is safe because play vars take
precedence over facts)
* disable the :ref:`inject_facts_as_vars` configuration setting to prevent fact values from colliding
with variables (this will also disable the original warning)
.. _commercial_support:
Can I get training on Ansible?
++++++++++++++++++++++++++++++
Yes! See our `services page <https://www.ansible.com/products/consulting>`_ for information on our services
and training offerings. Email `[email protected] <mailto:[email protected]>`_ for further details.
We also offer free web-based training classes on a regular basis. See our
`webinar page <https://www.ansible.com/resources/webinars-training>`_ for more info on upcoming webinars.
.. _web_interface:
Is there a web interface / REST API / GUI?
++++++++++++++++++++++++++++++++++++++++++++
Yes! The open-source web interface is Ansible AWX. The supported Red Hat product that makes Ansible even more powerful and easy to use is :ref:`Red Hat Ansible Automation Platform <ansible_platform>`.
.. _keep_secret_data:
How do I keep secret data in my playbook?
+++++++++++++++++++++++++++++++++++++++++
If you would like to keep secret data in your Ansible content and still share it publicly or keep things in source control, see :ref:`playbooks_vault`.
If you have a task that you don't want to show the results or command given to it when using -v (verbose) mode, the following task or playbook attribute can be useful::
- name: secret task
shell: /usr/bin/do_something --value={{ secret_value }}
no_log: True
This can be used to keep verbose output but hide sensitive information from others who would otherwise like to be able to see the output.
The ``no_log`` attribute can also apply to an entire play::
- hosts: all
no_log: True
Though this will make the play somewhat difficult to debug. It's recommended that this
be applied to single tasks only, once a playbook is completed. Note that the use of the
``no_log`` attribute does not prevent data from being shown when debugging Ansible itself via
the :envvar:`ANSIBLE_DEBUG` environment variable.
.. _when_to_use_brackets:
.. _dynamic_variables:
.. _interpolate_variables:
When should I use {{ }}? Also, how to interpolate variables or dynamic variable names
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A steadfast rule is 'always use ``{{ }}`` except when ``when:``'.
Conditionals are always run through Jinja2 as to resolve the expression,
so ``when:``, ``failed_when:`` and ``changed_when:`` are always templated and you should avoid adding ``{{ }}``.
In most other cases you should always use the brackets, even if previously you could use variables without
specifying (like ``loop`` or ``with_`` clauses), as this made it hard to distinguish between an undefined variable and a string.
Another rule is 'moustaches don't stack'. We often see this:
.. code-block:: jinja
{{ somevar_{{other_var}} }}
The above DOES NOT WORK as you expect, if you need to use a dynamic variable use the following as appropriate:
.. code-block:: jinja
{{ hostvars[inventory_hostname]['somevar_' ~ other_var] }}
For 'non host vars' you can use the :ref:`vars lookup<vars_lookup>` plugin:
.. code-block:: jinja
{{ lookup('vars', 'somevar_' ~ other_var) }}
To determine if a keyword requires ``{{ }}`` or even supports templating, use ``ansible-doc -t keyword <name>``,
this will return documentation on the keyword including a ``template`` field with the values ``explicit`` (requires ``{{ }}``),
``implicit`` (assumes ``{{ }}``, so no needed) or ``static`` (no templating supported, all characters will be interpreted literally)
.. _ansible_host_delegated:
How do I get the original ansible_host when I delegate a task?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
As the documentation states, connection variables are taken from the ``delegate_to`` host so ``ansible_host`` is overwritten,
but you can still access the original via ``hostvars``::
original_host: "{{ hostvars[inventory_hostname]['ansible_host'] }}"
This works for all overridden connection variables, like ``ansible_user``, ``ansible_port``, and so on.
.. _scp_protocol_error_filename:
How do I fix 'protocol error: filename does not match request' when fetching a file?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Since release ``7.9p1`` of OpenSSH there is a `bug <https://bugzilla.mindrot.org/show_bug.cgi?id=2966>`_
in the SCP client that can trigger this error on the Ansible controller when using SCP as the file transfer mechanism::
failed to transfer file to /tmp/ansible/file.txt\r\nprotocol error: filename does not match request
In these releases, SCP tries to validate that the path of the file to fetch matches the requested path.
The validation
fails if the remote filename requires quotes to escape spaces or non-ascii characters in its path. To avoid this error:
* Use SFTP instead of SCP by setting ``scp_if_ssh`` to ``smart`` (which tries SFTP first) or to ``False``. You can do this in one of four ways:
* Rely on the default setting, which is ``smart`` - this works if ``scp_if_ssh`` is not explicitly set anywhere
* Set a :ref:`host variable <host_variables>` or :ref:`group variable <group_variables>` in inventory: ``ansible_scp_if_ssh: False``
* Set an environment variable on your control node: ``export ANSIBLE_SCP_IF_SSH=False``
* Pass an environment variable when you run Ansible: ``ANSIBLE_SCP_IF_SSH=smart ansible-playbook``
* Modify your ``ansible.cfg`` file: add ``scp_if_ssh=False`` to the ``[ssh_connection]`` section
* If you must use SCP, set the ``-T`` arg to tell the SCP client to ignore path validation. You can do this in one of three ways:
* Set a :ref:`host variable <host_variables>` or :ref:`group variable <group_variables>`: ``ansible_scp_extra_args=-T``,
* Export or pass an environment variable: ``ANSIBLE_SCP_EXTRA_ARGS=-T``
* Modify your ``ansible.cfg`` file: add ``scp_extra_args=-T`` to the ``[ssh_connection]`` section
.. note:: If you see an ``invalid argument`` error when using ``-T``, then your SCP client is not performing filename validation and will not trigger this error.
.. _mfa_support:
Does Ansible support multiple factor authentication 2FA/MFA/biometrics/finterprint/usbkey/OTP/...
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
No, Ansible is designed to execute multiple tasks against multiple targets, minimizing user interaction.
As with most automation tools, it is not compatible with interactive security systems designed to handle human interaction.
Most of these systems require a secondary prompt per target, which prevents scaling to thousands of targets. They also
tend to have very short expiration periods so it requires frequent reauthorization, also an issue with many hosts and/or
a long set of tasks.
In such environments we recommend securing around Ansible's execution but still allowing it to use an 'automation user' that does not require such measures.
With AWX or the :ref:`Red Hat Ansible Automation Platform <ansible_platform>`, administrators can set up RBAC access to inventory, along with managing credentials and job execution.
.. _complex_configuration_validation:
The 'validate' option is not enough for my needs, what do I do?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Many Ansible modules that create or update files have a ``validate`` option that allows you to abort the update if the validation command fails.
This uses the temporary file Ansible creates before doing the final update. In many cases this does not work since the validation tools
for the specific application require either specific names, multiple files or some other factor that is not present in this simple feature.
For these cases you have to handle the validation and restoration yourself. The following is a simple example of how to do this with block/rescue
and backups, which most file based modules also support:
.. code-block:: yaml
- name: update config and backout if validation fails
block:
- name: do the actual update, works with copy, lineinfile and any action that allows for `backup`.
template: src=template.j2 dest=/x/y/z backup=yes moreoptions=stuff
register: updated
- name: run validation, this will change a lot as needed. We assume it returns an error when not passing, use `failed_when` if otherwise.
shell: run_validation_commmand
become: yes
become_user: requiredbyapp
environment:
WEIRD_REQUIREMENT: 1
rescue:
- name: restore backup file to original, in the hope the previous configuration was working.
copy:
remote_src: yes
dest: /x/y/z
src: "{{ updated['backup_file'] }}"
always:
- name: We choose to always delete backup, but could copy or move, or only delete in rescue.
file:
path: "{{ updated['backup_file'] }}"
state: absent
.. _jinja2_faqs:
Why does the ``regex_search`` filter return `None` instead of an empty string?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Until the jinja2 2.10 release, Jinja was only able to return strings, but Ansible needed Python objects in some cases. Ansible uses ``safe_eval`` and only sends strings that look like certain types of Python objects through this function. With ``regex_search`` that does not find a match, the result (``None``) is converted to the string "None" which is not useful in non-native jinja2.
The following example of a single templating action shows this behavior:
.. code-block:: Jinja
{{ 'ansible' | regex_search('foobar') }}
This example does not result in a Python ``None``, so Ansible historically converted it to "" (empty string).
The native jinja2 functionality actually allows us to return full Python objects, that are always represented as Python objects everywhere, and as such the result of a single templating action with ``regex_search`` can result in the Python ``None``.
.. note::
Native jinja2 functionality is not needed when ``regex_search`` is used as an intermediate result that is then compared to the jinja2 ``none`` test.
.. code-block:: Jinja
{{ 'ansible' | regex_search('foobar') is none }}
.. _docs_contributions:
How do I submit a change to the documentation?
++++++++++++++++++++++++++++++++++++++++++++++
Documentation for Ansible is kept in the main project git repository, and complete instructions
for contributing can be found in the docs README `viewable on GitHub <https://github.com/ansible/ansible/blob/devel/docs/docsite/README.md>`_. Thanks!
.. _i_dont_see_my_question:
I don't see my question here
++++++++++++++++++++++++++++
If you have not found an answer to your questions, you can ask on one of our mailing lists or chat channels. For instructions on subscribing to a list or joining a chat channel, see :ref:`communication`.
.. seealso::
:ref:`working_with_playbooks`
An introduction to playbooks
:ref:`playbooks_best_practices`
Tips and tricks for playbooks
`User Mailing List <https://groups.google.com/group/ansible-project>`_
Have a question? Stop by the google group!
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,556 |
Banner about 2.9 on latest docs is misleading
|
### Summary
Feeback: - the line "Red Hat subscribers, select 2.9 in the version selection to the left for the most recent Red Hat release." is confusing, because 2.9 is not the latest release but rather the most recent release prior to the core/collections split.
### Issue Type
Documentation Report
### Component Name
docs/docsite/sphinx_conf/ansible_conf.py
### Ansible Version
```console
$ ansible --version
2.13
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
none
```
### OS / Environment
none
### Additional Information
none
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78556
|
https://github.com/ansible/ansible/pull/78563
|
33ee5d4c1b8ea5357e29749a71f55d8f03642b9e
|
2fdaee143a88e1c63c7dc56f06a114bf31380683
| 2022-08-16T14:37:23Z |
python
| 2022-08-17T15:56:49Z |
docs/docsite/.templates/banner.html
|
{% if is_eol %}
{# Creates a banner at the top of the page for EOL versions. #}
<div id='banner' class='Admonition caution'>
<p>You are reading an unmaintained version of the Ansible documentation. Unmaintained Ansible versions can contain unfixed security vulnerabilities (CVE). Please upgrade to a maintained version. See <a href="/ansible/latest/">the latest Ansible documentation</a>.</p>
</div>
{% else %}
<script>
function startsWith(str, needle) {
return str.slice(0, needle.length) == needle
}
function startsWithOneOf(str, needles) {
return needles.some(function (needle) {
return startsWith(str, needle);
});
}
var banner = '';
var extra_banner = '';
/*use extra_banner for when marketing wants something extra, like a survey or AnsibleFest notice */
var extra_banner =
'<div id="latest_extra_banner_id" class="admonition important">' +
'<br>' +
'<p>' +
'Immerse yourself in the automation experience October 18-19 in Chicago. ' +
'<p>' +
'Join us for AnsibleFest 2022. ' +
'<pr>' +
'<a href="https://www.ansible.com/ansiblefest?intcmp=7013a000002tuDxAAI">Register now!</a> ' +
'</p>' +
'<br>' +
'</div>';
// Create a banner if we're not on the official docs site
if (location.host == "docs.testing.ansible.com") {
document.write('<div id="testing_banner_id" class="admonition important">' +
'<p>This is the testing site for Ansible Documentation. Unless you are reviewing pre-production changes, please visit the <a href="https://docs.ansible.com/ansible/latest/">official documentation website</a>.</p> <p></p>' +
'</div>');
}
{% if available_versions is defined %}
// Create a banner
current_url_path = window.location.pathname;
var important = false;
var msg = '<p>';
if (startsWith(current_url_path, "/ansible-core/")) {
msg += 'You are reading documentation for Ansible Core, which contains no plugins except for those in ansible.builtin. For documentation of the Ansible package, go to <a href="/ansible/latest">the latest documentation</a>.';
} else if (startsWithOneOf(current_url_path, ["/ansible/latest/", "/ansible/{{ latest_version }}/"])) {
/* temp extra banner to advertise AnsibeFest2021 */
banner += extra_banner;
msg += 'You are reading the <b>latest</b> (stable) community version of the Ansible documentation. Red Hat subscribers, select <b>2.9</b> in the version selection to the left for the most recent Red Hat release.';
} else if (startsWith(current_url_path, "/ansible/2.9/")) {
msg += 'You are reading the latest Red Hat released version of the Ansible documentation. Community users can use this version, or select <b>latest</b> from the version selector to the left for the most recent community version.';
} else if (startsWith(current_url_path, "/ansible/devel/")) {
/* temp extra banner to advertise AnsibleFest2021 */
banner += extra_banner;
/* temp banner to advertise survey
important = true;
msg += 'Please take our <a href="https://www.surveymonkey.co.uk/r/B9V3CDY">Docs survey</a> before December 31 to help us improve Ansible documentation.';
*/
msg += 'You are reading the <b>devel</b> version of the Ansible documentation - this version is not guaranteed stable. Use the version selection to the left if you want the <b>latest</b> (stable) released version.';
} else {
msg += 'You are reading an older version of the Ansible documentation. Use the version selection to the left if you want the <b>latest</b> (stable) released version.';
}
msg += '</p>';
banner += '<div id="banner_id" class="admonition ';
banner += important ? 'important' : 'caution';
banner += '">';
banner += important ? '<br>' : '';
banner += msg;
banner += important ? '<br>' : '';
banner += '</div>';
document.write(banner);
{% endif %}
</script>
{% endif %}
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,364 |
yum_repository module adds deprecated options on centos/rhel 8
|
### Summary
Adding a yum repository to a centos/rhel 8 system with yum_repository module adds async option (deprecated) to repository-file.
Inspecting /var/log/dnf.log on centos/rhel 8 after running `dnf repolist` produces the following errors.
```
2021-07-30T10:38:19+0200 DEBUG Unknown configuration option: async = 0 in /etc/yum.repos.d/epel.repo
```
Seems yum_repository module should omit the async option when target system is centos/rhel8?
best regards, Mikael
### Issue Type
Bug Report
### Component Name
yum_repository
### Ansible Version
```console
$ ansible --version
config file = None
configured module search path = ['/home/$USER/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.9/site-packages/ansible
ansible collection location = /ansible-collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.9.5 (default, Jun 23 2021, 15:10:59) [GCC 8.3.0]
jinja version = 3.0.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
none
```
### OS / Environment
CentOS Linux release 8.4.2105
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
---
- hosts: all
become: true
tasks:
- name: Add repository
yum_repository:
name: epel
description: EPEL YUM repo
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
gpgcheck: no
```
### Expected Results
```
TASK [Add repository] ***********************************************************
--- before: /etc/yum.repos.d/epel.repo
+++ after: /etc/yum.repos.d/epel.repo
@@ -0,0 +1,6 @@
+[epel]
+baseurl = https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+gpgcheck = 0
+name = EPEL YUM repo
+
changed: [centos8-host]
```
### Actual Results
```console
TASK [Add repository] ***********************************************************
--- before: /etc/yum.repos.d/epel.repo
+++ after: /etc/yum.repos.d/epel.repo
@@ -0,0 +1,6 @@
+[epel]
+async = 1
+baseurl = https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+gpgcheck = 0
+name = EPEL YUM repo
+
changed: [centos8-host]
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75364
|
https://github.com/ansible/ansible/pull/76750
|
bf1ef5a1f3562c9a59168adbc78750304c3e4309
|
2e8082ebb0faf042a26f5c51fa70b9ce0a819f43
| 2021-07-30T08:41:02Z |
python
| 2022-08-17T17:56:24Z |
changelogs/fragments/75364-yum-repository-async.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,364 |
yum_repository module adds deprecated options on centos/rhel 8
|
### Summary
Adding a yum repository to a centos/rhel 8 system with yum_repository module adds async option (deprecated) to repository-file.
Inspecting /var/log/dnf.log on centos/rhel 8 after running `dnf repolist` produces the following errors.
```
2021-07-30T10:38:19+0200 DEBUG Unknown configuration option: async = 0 in /etc/yum.repos.d/epel.repo
```
Seems yum_repository module should omit the async option when target system is centos/rhel8?
best regards, Mikael
### Issue Type
Bug Report
### Component Name
yum_repository
### Ansible Version
```console
$ ansible --version
config file = None
configured module search path = ['/home/$USER/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.9/site-packages/ansible
ansible collection location = /ansible-collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.9.5 (default, Jun 23 2021, 15:10:59) [GCC 8.3.0]
jinja version = 3.0.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
none
```
### OS / Environment
CentOS Linux release 8.4.2105
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
---
- hosts: all
become: true
tasks:
- name: Add repository
yum_repository:
name: epel
description: EPEL YUM repo
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
gpgcheck: no
```
### Expected Results
```
TASK [Add repository] ***********************************************************
--- before: /etc/yum.repos.d/epel.repo
+++ after: /etc/yum.repos.d/epel.repo
@@ -0,0 +1,6 @@
+[epel]
+baseurl = https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+gpgcheck = 0
+name = EPEL YUM repo
+
changed: [centos8-host]
```
### Actual Results
```console
TASK [Add repository] ***********************************************************
--- before: /etc/yum.repos.d/epel.repo
+++ after: /etc/yum.repos.d/epel.repo
@@ -0,0 +1,6 @@
+[epel]
+async = 1
+baseurl = https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+gpgcheck = 0
+name = EPEL YUM repo
+
changed: [centos8-host]
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75364
|
https://github.com/ansible/ansible/pull/76750
|
bf1ef5a1f3562c9a59168adbc78750304c3e4309
|
2e8082ebb0faf042a26f5c51fa70b9ce0a819f43
| 2021-07-30T08:41:02Z |
python
| 2022-08-17T17:56:24Z |
lib/ansible/modules/yum_repository.py
|
# encoding: utf-8
# (c) 2015-2016, Jiri Tyr <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: yum_repository
author: Jiri Tyr (@jtyr)
version_added: '2.1'
short_description: Add or remove YUM repositories
description:
- Add or remove YUM repositories in RPM-based Linux distributions.
- If you wish to update an existing repository definition use M(community.general.ini_file) instead.
options:
async:
description:
- If set to C(yes) Yum will download packages and metadata from this
repo in parallel, if possible.
type: bool
default: 'yes'
bandwidth:
description:
- Maximum available network bandwidth in bytes/second. Used with the
I(throttle) option.
- If I(throttle) is a percentage and bandwidth is C(0) then bandwidth
throttling will be disabled. If I(throttle) is expressed as a data rate
(bytes/sec) then this option is ignored. Default is C(0) (no bandwidth
throttling).
type: str
default: '0'
baseurl:
description:
- URL to the directory where the yum repository's 'repodata' directory
lives.
- It can also be a list of multiple URLs.
- This, the I(metalink) or I(mirrorlist) parameters are required if I(state) is set to
C(present).
type: list
elements: str
cost:
description:
- Relative cost of accessing this repository. Useful for weighing one
repo's packages as greater/less than any other.
type: str
default: '1000'
deltarpm_metadata_percentage:
description:
- When the relative size of deltarpm metadata vs pkgs is larger than
this, deltarpm metadata is not downloaded from the repo. Note that you
can give values over C(100), so C(200) means that the metadata is
required to be half the size of the packages. Use C(0) to turn off
this check, and always download metadata.
type: str
default: '100'
deltarpm_percentage:
description:
- When the relative size of delta vs pkg is larger than this, delta is
not used. Use C(0) to turn off delta rpm processing. Local repositories
(with file:// I(baseurl)) have delta rpms turned off by default.
type: str
default: '75'
description:
description:
- A human readable string describing the repository. This option corresponds to the "name" property in the repo file.
- This parameter is only required if I(state) is set to C(present).
type: str
enabled:
description:
- This tells yum whether or not use this repository.
- Yum default value is C(true).
type: bool
enablegroups:
description:
- Determines whether yum will allow the use of package groups for this
repository.
- Yum default value is C(true).
type: bool
exclude:
description:
- List of packages to exclude from updates or installs. This should be a
space separated list. Shell globs using wildcards (eg. C(*) and C(?))
are allowed.
- The list can also be a regular YAML array.
type: list
elements: str
failovermethod:
choices: [roundrobin, priority]
default: roundrobin
description:
- C(roundrobin) randomly selects a URL out of the list of URLs to start
with and proceeds through each of them as it encounters a failure
contacting the host.
- C(priority) starts from the first I(baseurl) listed and reads through
them sequentially.
type: str
file:
description:
- File name without the C(.repo) extension to save the repo in. Defaults
to the value of I(name).
type: str
gpgcakey:
description:
- A URL pointing to the ASCII-armored CA key file for the repository.
type: str
gpgcheck:
description:
- Tells yum whether or not it should perform a GPG signature check on
packages.
- No default setting. If the value is not set, the system setting from
C(/etc/yum.conf) or system default of C(no) will be used.
type: bool
gpgkey:
description:
- A URL pointing to the ASCII-armored GPG key file for the repository.
- It can also be a list of multiple URLs.
type: list
elements: str
module_hotfixes:
description:
- Disable module RPM filtering and make all RPMs from the repository
available. The default is C(None).
version_added: '2.11'
type: bool
http_caching:
description:
- Determines how upstream HTTP caches are instructed to handle any HTTP
downloads that Yum does.
- C(all) means that all HTTP downloads should be cached.
- C(packages) means that only RPM package downloads should be cached (but
not repository metadata downloads).
- C(none) means that no HTTP downloads should be cached.
choices: [all, packages, none]
type: str
default: all
include:
description:
- Include external configuration file. Both, local path and URL is
supported. Configuration file will be inserted at the position of the
I(include=) line. Included files may contain further include lines.
Yum will abort with an error if an inclusion loop is detected.
type: str
includepkgs:
description:
- List of packages you want to only use from a repository. This should be
a space separated list. Shell globs using wildcards (eg. C(*) and C(?))
are allowed. Substitution variables (e.g. C($releasever)) are honored
here.
- The list can also be a regular YAML array.
type: list
elements: str
ip_resolve:
description:
- Determines how yum resolves host names.
- C(4) or C(IPv4) - resolve to IPv4 addresses only.
- C(6) or C(IPv6) - resolve to IPv6 addresses only.
choices: ['4', '6', IPv4, IPv6, whatever]
type: str
default: whatever
keepalive:
description:
- This tells yum whether or not HTTP/1.1 keepalive should be used with
this repository. This can improve transfer speeds by using one
connection when downloading multiple files from a repository.
type: bool
default: 'no'
keepcache:
description:
- Either C(1) or C(0). Determines whether or not yum keeps the cache of
headers and packages after successful installation.
choices: ['0', '1']
type: str
default: '1'
metadata_expire:
description:
- Time (in seconds) after which the metadata will expire.
- Default value is 6 hours.
type: str
default: '21600'
metadata_expire_filter:
description:
- Filter the I(metadata_expire) time, allowing a trade of speed for
accuracy if a command doesn't require it. Each yum command can specify
that it requires a certain level of timeliness quality from the remote
repos. from "I'm about to install/upgrade, so this better be current"
to "Anything that's available is good enough".
- C(never) - Nothing is filtered, always obey I(metadata_expire).
- C(read-only:past) - Commands that only care about past information are
filtered from metadata expiring. Eg. I(yum history) info (if history
needs to lookup anything about a previous transaction, then by
definition the remote package was available in the past).
- C(read-only:present) - Commands that are balanced between past and
future. Eg. I(yum list yum).
- C(read-only:future) - Commands that are likely to result in running
other commands which will require the latest metadata. Eg.
I(yum check-update).
- Note that this option does not override "yum clean expire-cache".
choices: [never, 'read-only:past', 'read-only:present', 'read-only:future']
type: str
default: 'read-only:present'
metalink:
description:
- Specifies a URL to a metalink file for the repomd.xml, a list of
mirrors for the entire repository are generated by converting the
mirrors for the repomd.xml file to a I(baseurl).
- This, the I(baseurl) or I(mirrorlist) parameters are required if I(state) is set to
C(present).
type: str
mirrorlist:
description:
- Specifies a URL to a file containing a list of baseurls.
- This, the I(baseurl) or I(metalink) parameters are required if I(state) is set to
C(present).
type: str
mirrorlist_expire:
description:
- Time (in seconds) after which the mirrorlist locally cached will
expire.
- Default value is 6 hours.
type: str
default: '21600'
name:
description:
- Unique repository ID. This option builds the section name of the repository in the repo file.
- This parameter is only required if I(state) is set to C(present) or
C(absent).
type: str
required: true
password:
description:
- Password to use with the username for basic authentication.
type: str
priority:
description:
- Enforce ordered protection of repositories. The value is an integer
from 1 to 99.
- This option only works if the YUM Priorities plugin is installed.
type: str
default: '99'
protect:
description:
- Protect packages from updates from other repositories.
type: bool
default: 'no'
proxy:
description:
- URL to the proxy server that yum should use. Set to C(_none_) to
disable the global proxy setting.
type: str
proxy_password:
description:
- Password for this proxy.
type: str
proxy_username:
description:
- Username to use for proxy.
type: str
repo_gpgcheck:
description:
- This tells yum whether or not it should perform a GPG signature check
on the repodata from this repository.
type: bool
default: 'no'
reposdir:
description:
- Directory where the C(.repo) files will be stored.
type: path
default: /etc/yum.repos.d
retries:
description:
- Set the number of times any attempt to retrieve a file should retry
before returning an error. Setting this to C(0) makes yum try forever.
type: str
default: '10'
s3_enabled:
description:
- Enables support for S3 repositories.
- This option only works if the YUM S3 plugin is installed.
type: bool
default: 'no'
skip_if_unavailable:
description:
- If set to C(yes) yum will continue running if this repository cannot be
contacted for any reason. This should be set carefully as all repos are
consulted for any given command.
type: bool
default: 'no'
ssl_check_cert_permissions:
description:
- Whether yum should check the permissions on the paths for the
certificates on the repository (both remote and local).
- If we can't read any of the files then yum will force
I(skip_if_unavailable) to be C(yes). This is most useful for non-root
processes which use yum on repos that have client cert files which are
readable only by root.
type: bool
default: 'no'
sslcacert:
description:
- Path to the directory containing the databases of the certificate
authorities yum should use to verify SSL certificates.
type: str
aliases: [ ca_cert ]
sslclientcert:
description:
- Path to the SSL client certificate yum should use to connect to
repos/remote sites.
type: str
aliases: [ client_cert ]
sslclientkey:
description:
- Path to the SSL client key yum should use to connect to repos/remote
sites.
type: str
aliases: [ client_key ]
sslverify:
description:
- Defines whether yum should verify SSL certificates/hosts at all.
type: bool
default: 'yes'
aliases: [ validate_certs ]
state:
description:
- State of the repo file.
choices: [absent, present]
type: str
default: present
throttle:
description:
- Enable bandwidth throttling for downloads.
- This option can be expressed as a absolute data rate in bytes/sec. An
SI prefix (k, M or G) may be appended to the bandwidth value.
type: str
timeout:
description:
- Number of seconds to wait for a connection before timing out.
type: str
default: '30'
ui_repoid_vars:
description:
- When a repository id is displayed, append these yum variables to the
string if they are used in the I(baseurl)/etc. Variables are appended
in the order listed (and found).
type: str
default: releasever basearch
username:
description:
- Username to use for basic authentication to a repo or really any url.
type: str
extends_documentation_fragment:
- action_common_attributes
- files
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: rhel
notes:
- All comments will be removed if modifying an existing repo file.
- Section order is preserved in an existing repo file.
- Parameters in a section are ordered alphabetically in an existing repo
file.
- The repo file will be automatically deleted if it contains no repository.
- When removing a repository, beware that the metadata cache may still remain
on disk until you run C(yum clean all). Use a notification handler for this.
- "The C(params) parameter was removed in Ansible 2.5 due to circumventing Ansible's parameter
handling"
'''
EXAMPLES = '''
- name: Add repository
ansible.builtin.yum_repository:
name: epel
description: EPEL YUM repo
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
- name: Add multiple repositories into the same file (1/2)
ansible.builtin.yum_repository:
name: epel
description: EPEL YUM repo
file: external_repos
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
gpgcheck: no
- name: Add multiple repositories into the same file (2/2)
ansible.builtin.yum_repository:
name: rpmforge
description: RPMforge YUM repo
file: external_repos
baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge
mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge
enabled: no
# Handler showing how to clean yum metadata cache
- name: yum-clean-metadata
ansible.builtin.command: yum clean metadata
# Example removing a repository and cleaning up metadata cache
- name: Remove repository (and clean up left-over metadata)
ansible.builtin.yum_repository:
name: epel
state: absent
notify: yum-clean-metadata
- name: Remove repository from a specific repo file
ansible.builtin.yum_repository:
name: epel
file: external_repos
state: absent
'''
RETURN = '''
repo:
description: repository name
returned: success
type: str
sample: "epel"
state:
description: state of the target, after execution
returned: success
type: str
sample: "present"
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import configparser
from ansible.module_utils._text import to_native
class YumRepo(object):
# Class global variables
module = None
params = None
section = None
repofile = configparser.RawConfigParser()
# List of parameters which will be allowed in the repo file output
allowed_params = [
'async',
'bandwidth',
'baseurl',
'cost',
'deltarpm_metadata_percentage',
'deltarpm_percentage',
'enabled',
'enablegroups',
'exclude',
'failovermethod',
'gpgcakey',
'gpgcheck',
'gpgkey',
'module_hotfixes',
'http_caching',
'include',
'includepkgs',
'ip_resolve',
'keepalive',
'keepcache',
'metadata_expire',
'metadata_expire_filter',
'metalink',
'mirrorlist',
'mirrorlist_expire',
'name',
'password',
'priority',
'protect',
'proxy',
'proxy_password',
'proxy_username',
'repo_gpgcheck',
'retries',
's3_enabled',
'skip_if_unavailable',
'sslcacert',
'ssl_check_cert_permissions',
'sslclientcert',
'sslclientkey',
'sslverify',
'throttle',
'timeout',
'ui_repoid_vars',
'username']
# List of parameters which can be a list
list_params = ['exclude', 'includepkgs']
def __init__(self, module):
# To be able to use fail_json
self.module = module
# Shortcut for the params
self.params = self.module.params
# Section is always the repoid
self.section = self.params['repoid']
# Check if repo directory exists
repos_dir = self.params['reposdir']
if not os.path.isdir(repos_dir):
self.module.fail_json(
msg="Repo directory '%s' does not exist." % repos_dir)
# Set dest; also used to set dest parameter for the FS attributes
self.params['dest'] = os.path.join(
repos_dir, "%s.repo" % self.params['file'])
# Read the repo file if it exists
if os.path.isfile(self.params['dest']):
self.repofile.read(self.params['dest'])
def add(self):
# Remove already existing repo and create a new one
if self.repofile.has_section(self.section):
self.repofile.remove_section(self.section)
# Add section
self.repofile.add_section(self.section)
# Baseurl/mirrorlist is not required because for removal we need only
# the repo name. This is why we check if the baseurl/mirrorlist is
# defined.
req_params = (self.params['baseurl'], self.params['metalink'], self.params['mirrorlist'])
if req_params == (None, None, None):
self.module.fail_json(
msg="Parameter 'baseurl', 'metalink' or 'mirrorlist' is required for "
"adding a new repo.")
# Set options
for key, value in sorted(self.params.items()):
if key in self.list_params and isinstance(value, list):
# Join items into one string for specific parameters
value = ' '.join(value)
elif isinstance(value, bool):
# Convert boolean value to integer
value = int(value)
# Set the value only if it was defined (default is None)
if value is not None and key in self.allowed_params:
self.repofile.set(self.section, key, value)
def save(self):
if len(self.repofile.sections()):
# Write data into the file
try:
with open(self.params['dest'], 'w') as fd:
self.repofile.write(fd)
except IOError as e:
self.module.fail_json(
msg="Problems handling file %s." % self.params['dest'],
details=to_native(e))
else:
# Remove the file if there are not repos
try:
os.remove(self.params['dest'])
except OSError as e:
self.module.fail_json(
msg=(
"Cannot remove empty repo file %s." %
self.params['dest']),
details=to_native(e))
def remove(self):
# Remove section if exists
if self.repofile.has_section(self.section):
self.repofile.remove_section(self.section)
def dump(self):
repo_string = ""
# Compose the repo file
for section in sorted(self.repofile.sections()):
repo_string += "[%s]\n" % section
for key, value in sorted(self.repofile.items(section)):
repo_string += "%s = %s\n" % (key, value)
repo_string += "\n"
return repo_string
def main():
# Module settings
argument_spec = dict(
bandwidth=dict(),
baseurl=dict(type='list', elements='str'),
cost=dict(),
deltarpm_metadata_percentage=dict(),
deltarpm_percentage=dict(),
description=dict(),
enabled=dict(type='bool'),
enablegroups=dict(type='bool'),
exclude=dict(type='list', elements='str'),
failovermethod=dict(choices=['roundrobin', 'priority']),
file=dict(),
gpgcakey=dict(no_log=False),
gpgcheck=dict(type='bool'),
gpgkey=dict(type='list', elements='str', no_log=False),
module_hotfixes=dict(type='bool'),
http_caching=dict(choices=['all', 'packages', 'none']),
include=dict(),
includepkgs=dict(type='list', elements='str'),
ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
keepalive=dict(type='bool'),
keepcache=dict(choices=['0', '1']),
metadata_expire=dict(),
metadata_expire_filter=dict(
choices=[
'never',
'read-only:past',
'read-only:present',
'read-only:future']),
metalink=dict(),
mirrorlist=dict(),
mirrorlist_expire=dict(),
name=dict(required=True),
params=dict(type='dict'),
password=dict(no_log=True),
priority=dict(),
protect=dict(type='bool'),
proxy=dict(),
proxy_password=dict(no_log=True),
proxy_username=dict(),
repo_gpgcheck=dict(type='bool'),
reposdir=dict(default='/etc/yum.repos.d', type='path'),
retries=dict(),
s3_enabled=dict(type='bool'),
skip_if_unavailable=dict(type='bool'),
sslcacert=dict(aliases=['ca_cert']),
ssl_check_cert_permissions=dict(type='bool'),
sslclientcert=dict(aliases=['client_cert']),
sslclientkey=dict(aliases=['client_key'], no_log=False),
sslverify=dict(type='bool', aliases=['validate_certs']),
state=dict(choices=['present', 'absent'], default='present'),
throttle=dict(),
timeout=dict(),
ui_repoid_vars=dict(),
username=dict(),
)
argument_spec['async'] = dict(type='bool', default=True)
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
)
# Params was removed
# https://meetbot.fedoraproject.org/ansible-meeting/2017-09-28/ansible_dev_meeting.2017-09-28-15.00.log.html
if module.params['params']:
module.fail_json(msg="The params option to yum_repository was removed in Ansible 2.5 since it circumvents Ansible's option handling")
name = module.params['name']
state = module.params['state']
# Check if required parameters are present
if state == 'present':
if (
module.params['baseurl'] is None and
module.params['metalink'] is None and
module.params['mirrorlist'] is None):
module.fail_json(
msg="Parameter 'baseurl', 'metalink' or 'mirrorlist' is required.")
if module.params['description'] is None:
module.fail_json(
msg="Parameter 'description' is required.")
# Rename "name" and "description" to ensure correct key sorting
module.params['repoid'] = module.params['name']
module.params['name'] = module.params['description']
del module.params['description']
# Change list type to string for baseurl and gpgkey
for list_param in ['baseurl', 'gpgkey']:
if (
list_param in module.params and
module.params[list_param] is not None):
module.params[list_param] = "\n".join(module.params[list_param])
# Define repo file name if it doesn't exist
if module.params['file'] is None:
module.params['file'] = module.params['repoid']
# Instantiate the YumRepo object
yumrepo = YumRepo(module)
# Get repo status before change
diff = {
'before_header': yumrepo.params['dest'],
'before': yumrepo.dump(),
'after_header': yumrepo.params['dest'],
'after': ''
}
# Perform action depending on the state
if state == 'present':
yumrepo.add()
elif state == 'absent':
yumrepo.remove()
# Get repo status after change
diff['after'] = yumrepo.dump()
# Compare repo states
changed = diff['before'] != diff['after']
# Save the file only if not in check mode and if there was a change
if not module.check_mode and changed:
yumrepo.save()
# Change file attributes if needed
if os.path.isfile(module.params['dest']):
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
# Print status of the change
module.exit_json(changed=changed, repo=name, state=state, diff=diff)
if __name__ == '__main__':
main()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,069 |
ansible/builtin/dnf_module: Parameters document "list" but no examples given
|
### Summary
in ansible/builtin/dnf_module in the parameter section the parameter `list` is documented with
> "Various (non-idempotent) commands for usage with /usr/bin/ansible and not playbooks. See examples."
but there are no examples given for the usage of this parameter
### Issue Type
Documentation Report
### Component Name
collections/ansible/builtin/dnf_module
### Ansible Version
```console
this is a documentation bug in the recent online version and its regardless of my running ansible version.
See: https://docs.ansible.com/ansible/latest/collections/ansible/builtin/dnf_module.html#ansible-collections-ansible-builtin-dnf-module
```
### Configuration
```console
this is a documentation bug in the recent online version and its regardless of my running ansible version.
See: https://docs.ansible.com/ansible/latest/collections/ansible/builtin/dnf_module.html#ansible-collections-ansible-builtin-dnf-module
```
### OS / Environment
this is a documentation bug in the recent online version and its regardless of my running OS Environment.
See: https://docs.ansible.com/ansible/latest/collections/ansible/builtin/dnf_module.html#ansible-collections-ansible-builtin-dnf-module
### Additional Information
Providing Examples, on how to sensibly use the `list` parameter of the dnf module, would make it usable. Otherwise there will be an illogical reference from the parameterlist "See examples" when there are no examples explaining the usage.
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78069
|
https://github.com/ansible/ansible/pull/78484
|
2e8082ebb0faf042a26f5c51fa70b9ce0a819f43
|
8dc097989b88f4f4bf40434016faabbeb5e25d5f
| 2022-06-16T16:22:16Z |
python
| 2022-08-17T17:59:55Z |
lib/ansible/modules/dnf.py
|
# -*- coding: utf-8 -*-
# Copyright 2015 Cristian van Ee <cristian at cvee.org>
# Copyright 2015 Igor Gnatenko <[email protected]>
# Copyright 2018 Adam Miller <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: dnf
version_added: 1.9
short_description: Manages packages with the I(dnf) package manager
description:
- Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager.
options:
name:
description:
- "A package name or package specifier with version, like C(name-1.0).
When using state=latest, this can be '*' which means run: dnf -y update.
You can also pass a url or a local path to a rpm file.
To operate on several packages this can accept a comma separated string of packages or a list of packages."
- Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name >= 1.0).
Spaces around the operator are required.
- You can also pass an absolute path for a binary which is provided by the package to install.
See examples for more information.
required: true
aliases:
- pkg
type: list
elements: str
list:
description:
- Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples.
type: str
state:
description:
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
- Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is
enabled for this module, then C(absent) is inferred.
choices: ['absent', 'present', 'installed', 'removed', 'latest']
type: str
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
type: list
elements: str
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
type: list
elements: str
conf_file:
description:
- The remote dnf configuration file to use for the transaction.
type: str
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if state is I(present) or I(latest).
- This setting affects packages installed from a repository as well as
"local" packages installed from the filesystem or a URL.
type: bool
default: 'no'
installroot:
description:
- Specifies an alternative installroot, relative to which all packages
will be installed.
version_added: "2.3"
default: "/"
type: str
releasever:
description:
- Specifies an alternative release from which all packages will be
installed.
version_added: "2.6"
type: str
autoremove:
description:
- If C(yes), removes all "leaf" packages from the system that were originally
installed as dependencies of user-installed packages but which are no longer
required by any such package. Should be used alone or when state is I(absent)
type: bool
default: "no"
version_added: "2.4"
exclude:
description:
- Package name(s) to exclude when state=present, or latest. This can be a
list or a comma separated string.
version_added: "2.7"
type: list
elements: str
skip_broken:
description:
- Skip all unavailable packages or packages with broken dependencies
without raising an error. Equivalent to passing the --skip-broken option.
type: bool
default: "no"
version_added: "2.7"
update_cache:
description:
- Force dnf to check if cache is out of date and redownload if needed.
Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
aliases: [ expire-cache ]
version_added: "2.7"
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- Has an effect only if state is I(latest)
default: "no"
type: bool
version_added: "2.7"
security:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked security related.
- Note that, similar to C(dnf upgrade-minimal), this filter applies to dependencies as well.
type: bool
default: "no"
version_added: "2.7"
bugfix:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related.
- Note that, similar to C(dnf upgrade-minimal), this filter applies to dependencies as well.
default: "no"
type: bool
version_added: "2.7"
enable_plugin:
description:
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
version_added: "2.7"
type: list
elements: str
disable_plugin:
description:
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
version_added: "2.7"
type: list
elements: str
disable_excludes:
description:
- Disable the excludes defined in DNF config files.
- If set to C(all), disables all excludes.
- If set to C(main), disable excludes defined in [main] in dnf.conf.
- If set to C(repoid), disable excludes defined for given repo id.
version_added: "2.7"
type: str
validate_certs:
description:
- This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
type: bool
default: "yes"
version_added: "2.7"
sslverify:
description:
- Disables SSL validation of the repository server for this transaction.
- This should be set to C(no) if one of the configured repositories is using an untrusted or self-signed certificate.
type: bool
default: "yes"
version_added: "2.13"
allow_downgrade:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
Note that setting allow_downgrade=True can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
package and others can cause changes to the packages which were
in the earlier transaction).
type: bool
default: "no"
version_added: "2.7"
install_repoquery:
description:
- This is effectively a no-op in DNF as it is not needed with DNF, but is an accepted parameter for feature
parity/compatibility with the I(yum) module.
type: bool
default: "yes"
version_added: "2.7"
download_only:
description:
- Only download the packages, do not install them.
default: "no"
type: bool
version_added: "2.7"
lock_timeout:
description:
- Amount of time to wait for the dnf lockfile to be freed.
required: false
default: 30
type: int
version_added: "2.8"
install_weak_deps:
description:
- Will also install all packages linked by a weak dependency relation.
type: bool
default: "yes"
version_added: "2.8"
download_dir:
description:
- Specifies an alternate directory to store packages.
- Has an effect only if I(download_only) is specified.
type: str
version_added: "2.8"
allowerasing:
description:
- If C(yes) it allows erasing of installed packages to resolve dependencies.
required: false
type: bool
default: "no"
version_added: "2.10"
nobest:
description:
- Set best option to False, so that transactions are not limited to best candidates only.
required: false
type: bool
default: "no"
version_added: "2.11"
cacheonly:
description:
- Tells dnf to run entirely from system cache; does not download or update metadata.
type: bool
default: "no"
version_added: "2.12"
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.flow
attributes:
action:
details: In the case of dnf, it has 2 action plugins that use it under the hood, M(ansible.builtin.yum) and M(ansible.builtin.package).
support: partial
async:
support: none
bypass_host_loop:
support: none
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: rhel
notes:
- When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the I(name) option.
- Group removal doesn't work if the group was installed with Ansible because
upstream dnf's API doesn't properly mark groups as installed, therefore upon
removal the module is unable to detect that the group is installed
(https://bugzilla.redhat.com/show_bug.cgi?id=1620324)
requirements:
- "python >= 2.6"
- python-dnf
- for the autoremove option you need dnf >= 2.0.1"
author:
- Igor Gnatenko (@ignatenkobrain) <[email protected]>
- Cristian van Ee (@DJMuggs) <cristian at cvee.org>
- Berend De Schouwer (@berenddeschouwer)
- Adam Miller (@maxamillion) <[email protected]>
'''
EXAMPLES = '''
- name: Install the latest version of Apache
ansible.builtin.dnf:
name: httpd
state: latest
- name: Install Apache >= 2.4
ansible.builtin.dnf:
name: httpd >= 2.4
state: present
- name: Install the latest version of Apache and MariaDB
ansible.builtin.dnf:
name:
- httpd
- mariadb-server
state: latest
- name: Remove the Apache package
ansible.builtin.dnf:
name: httpd
state: absent
- name: Install the latest version of Apache from the testing repo
ansible.builtin.dnf:
name: httpd
enablerepo: testing
state: present
- name: Upgrade all packages
ansible.builtin.dnf:
name: "*"
state: latest
- name: Install the nginx rpm from a remote repo
ansible.builtin.dnf:
name: 'http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm'
state: present
- name: Install nginx rpm from a local file
ansible.builtin.dnf:
name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: Install Package based upon the file it provides
ansible.builtin.dnf:
name: /usr/bin/cowsay
state: present
- name: Install the 'Development tools' package group
ansible.builtin.dnf:
name: '@Development tools'
state: present
- name: Autoremove unneeded packages installed as dependencies
ansible.builtin.dnf:
autoremove: yes
- name: Uninstall httpd but keep its dependencies
ansible.builtin.dnf:
name: httpd
state: absent
autoremove: no
- name: Install a modularity appstream with defined stream and profile
ansible.builtin.dnf:
name: '@postgresql:9.6/client'
state: present
- name: Install a modularity appstream with defined stream
ansible.builtin.dnf:
name: '@postgresql:9.6'
state: present
- name: Install a modularity appstream with defined profile
ansible.builtin.dnf:
name: '@postgresql/client'
state: present
'''
import os
import re
import sys
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import fetch_file
from ansible.module_utils.six import PY2, text_type
from ansible.module_utils.compat.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
# NOTE dnf Python bindings import is postponed, see DnfModule._ensure_dnf(),
# because we need AnsibleModule object to use get_best_parsable_locale()
# to set proper locale before importing dnf to be able to scrape
# the output in some cases (FIXME?).
dnf = None
class DnfModule(YumDnf):
"""
DNF Ansible module back-end implementation
"""
def __init__(self, module):
# This populates instance vars for all argument spec params
super(DnfModule, self).__init__(module)
self._ensure_dnf()
self.lockfile = "/var/cache/dnf/*_lock.pid"
self.pkg_mgr_name = "dnf"
try:
self.with_modules = dnf.base.WITH_MODULES
except AttributeError:
self.with_modules = False
# DNF specific args that are not part of YumDnf
self.allowerasing = self.module.params['allowerasing']
self.nobest = self.module.params['nobest']
def is_lockfile_pid_valid(self):
# FIXME? it looks like DNF takes care of invalid lock files itself?
# https://github.com/ansible/ansible/issues/57189
return True
def _sanitize_dnf_error_msg_install(self, spec, error):
"""
For unhandled dnf.exceptions.Error scenarios, there are certain error
messages we want to filter in an install scenario. Do that here.
"""
if (
to_text("no package matched") in to_text(error) or
to_text("No match for argument:") in to_text(error)
):
return "No package {0} available.".format(spec)
return error
def _sanitize_dnf_error_msg_remove(self, spec, error):
"""
For unhandled dnf.exceptions.Error scenarios, there are certain error
messages we want to ignore in a removal scenario as known benign
failures. Do that here.
"""
if (
'no package matched' in to_native(error) or
'No match for argument:' in to_native(error)
):
return (False, "{0} is not installed".format(spec))
# Return value is tuple of:
# ("Is this actually a failure?", "Error Message")
return (True, error)
def _package_dict(self, package):
"""Return a dictionary of information for the package."""
# NOTE: This no longer contains the 'dnfstate' field because it is
# already known based on the query type.
result = {
'name': package.name,
'arch': package.arch,
'epoch': str(package.epoch),
'release': package.release,
'version': package.version,
'repo': package.repoid}
# envra format for alignment with the yum module
result['envra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format(**result)
# keep nevra key for backwards compat as it was previously
# defined with a value in envra format
result['nevra'] = result['envra']
if package.installtime == 0:
result['yumstate'] = 'available'
else:
result['yumstate'] = 'installed'
return result
def _split_package_arch(self, packagename):
# This list was auto generated on a Fedora 28 system with the following one-liner
# printf '[ '; for arch in $(ls /usr/lib/rpm/platform); do printf '"%s", ' ${arch%-linux}; done; printf ']\n'
redhat_rpm_arches = [
"aarch64", "alphaev56", "alphaev5", "alphaev67", "alphaev6", "alpha",
"alphapca56", "amd64", "armv3l", "armv4b", "armv4l", "armv5tejl", "armv5tel",
"armv5tl", "armv6hl", "armv6l", "armv7hl", "armv7hnl", "armv7l", "athlon",
"geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "m68k", "mips64el",
"mips64", "mips64r6el", "mips64r6", "mipsel", "mips", "mipsr6el", "mipsr6",
"noarch", "pentium3", "pentium4", "ppc32dy4", "ppc64iseries", "ppc64le", "ppc64",
"ppc64p7", "ppc64pseries", "ppc8260", "ppc8560", "ppciseries", "ppc", "ppcpseries",
"riscv64", "s390", "s390x", "sh3", "sh4a", "sh4", "sh", "sparc64", "sparc64v",
"sparc", "sparcv8", "sparcv9", "sparcv9v", "x86_64"
]
name, delimiter, arch = packagename.rpartition('.')
if name and arch and arch in redhat_rpm_arches:
return name, arch
return packagename, None
def _packagename_dict(self, packagename):
"""
Return a dictionary of information for a package name string or None
if the package name doesn't contain at least all NVR elements
"""
if packagename[-4:] == '.rpm':
packagename = packagename[:-4]
rpm_nevr_re = re.compile(r'(\S+)-(?:(\d*):)?(.*)-(~?\w+[\w.+]*)')
try:
arch = None
nevr, arch = self._split_package_arch(packagename)
if arch:
packagename = nevr
rpm_nevr_match = rpm_nevr_re.match(packagename)
if rpm_nevr_match:
name, epoch, version, release = rpm_nevr_re.match(packagename).groups()
if not version or not version.split('.')[0].isdigit():
return None
else:
return None
except AttributeError as e:
self.module.fail_json(
msg='Error attempting to parse package: %s, %s' % (packagename, to_native(e)),
rc=1,
results=[]
)
if not epoch:
epoch = "0"
if ':' in name:
epoch_name = name.split(":")
epoch = epoch_name[0]
name = ''.join(epoch_name[1:])
result = {
'name': name,
'epoch': epoch,
'release': release,
'version': version,
}
return result
# Original implementation from yum.rpmUtils.miscutils (GPLv2+)
# http://yum.baseurl.org/gitweb?p=yum.git;a=blob;f=rpmUtils/miscutils.py
def _compare_evr(self, e1, v1, r1, e2, v2, r2):
# return 1: a is newer than b
# 0: a and b are the same version
# -1: b is newer than a
if e1 is None:
e1 = '0'
else:
e1 = str(e1)
v1 = str(v1)
r1 = str(r1)
if e2 is None:
e2 = '0'
else:
e2 = str(e2)
v2 = str(v2)
r2 = str(r2)
# print '%s, %s, %s vs %s, %s, %s' % (e1, v1, r1, e2, v2, r2)
rc = dnf.rpm.rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
# print '%s, %s, %s vs %s, %s, %s = %s' % (e1, v1, r1, e2, v2, r2, rc)
return rc
def _ensure_dnf(self):
locale = get_best_parsable_locale(self.module)
os.environ['LC_ALL'] = os.environ['LC_MESSAGES'] = locale
os.environ['LANGUAGE'] = os.environ['LANG'] = locale
global dnf
try:
import dnf
import dnf.cli
import dnf.const
import dnf.exceptions
import dnf.subject
import dnf.util
HAS_DNF = True
except ImportError:
HAS_DNF = False
if HAS_DNF:
return
system_interpreters = ['/usr/libexec/platform-python',
'/usr/bin/python3',
'/usr/bin/python2',
'/usr/bin/python']
if not has_respawned():
# probe well-known system Python locations for accessible bindings, favoring py3
interpreter = probe_interpreters_for_module(system_interpreters, 'dnf')
if interpreter:
# respawn under the interpreter where the bindings should be found
respawn_module(interpreter)
# end of the line for this module, the process will exit here once the respawned module completes
# done all we can do, something is just broken (auto-install isn't useful anymore with respawn, so it was removed)
self.module.fail_json(
msg="Could not import the dnf python module using {0} ({1}). "
"Please install `python3-dnf` or `python2-dnf` package or ensure you have specified the "
"correct ansible_python_interpreter. (attempted {2})"
.format(sys.executable, sys.version.replace('\n', ''), system_interpreters),
results=[]
)
def _configure_base(self, base, conf_file, disable_gpg_check, installroot='/', sslverify=True):
"""Configure the dnf Base object."""
conf = base.conf
# Change the configuration file path if provided, this must be done before conf.read() is called
if conf_file:
# Fail if we can't read the configuration file.
if not os.access(conf_file, os.R_OK):
self.module.fail_json(
msg="cannot read configuration file", conf_file=conf_file,
results=[],
)
else:
conf.config_file_path = conf_file
# Read the configuration file
conf.read()
# Turn off debug messages in the output
conf.debuglevel = 0
# Set whether to check gpg signatures
conf.gpgcheck = not disable_gpg_check
conf.localpkg_gpgcheck = not disable_gpg_check
# Don't prompt for user confirmations
conf.assumeyes = True
# Set certificate validation
conf.sslverify = sslverify
# Set installroot
conf.installroot = installroot
# Load substitutions from the filesystem
conf.substitutions.update_from_etc(installroot)
# Handle different DNF versions immutable mutable datatypes and
# dnf v1/v2/v3
#
# In DNF < 3.0 are lists, and modifying them works
# In DNF >= 3.0 < 3.6 are lists, but modifying them doesn't work
# In DNF >= 3.6 have been turned into tuples, to communicate that modifying them doesn't work
#
# https://www.happyassassin.net/2018/06/27/adams-debugging-adventures-the-immutable-mutable-object/
#
# Set excludes
if self.exclude:
_excludes = list(conf.exclude)
_excludes.extend(self.exclude)
conf.exclude = _excludes
# Set disable_excludes
if self.disable_excludes:
_disable_excludes = list(conf.disable_excludes)
if self.disable_excludes not in _disable_excludes:
_disable_excludes.append(self.disable_excludes)
conf.disable_excludes = _disable_excludes
# Set releasever
if self.releasever is not None:
conf.substitutions['releasever'] = self.releasever
if conf.substitutions.get('releasever') is None:
self.module.warn(
'Unable to detect release version (use "releasever" option to specify release version)'
)
# values of conf.substitutions are expected to be strings
# setting this to an empty string instead of None appears to mimic the DNF CLI behavior
conf.substitutions['releasever'] = ''
# Set skip_broken (in dnf this is strict=0)
if self.skip_broken:
conf.strict = 0
# Set best
if self.nobest:
conf.best = 0
if self.download_only:
conf.downloadonly = True
if self.download_dir:
conf.destdir = self.download_dir
if self.cacheonly:
conf.cacheonly = True
# Default in dnf upstream is true
conf.clean_requirements_on_remove = self.autoremove
# Default in dnf (and module default) is True
conf.install_weak_deps = self.install_weak_deps
def _specify_repositories(self, base, disablerepo, enablerepo):
"""Enable and disable repositories matching the provided patterns."""
base.read_all_repos()
repos = base.repos
# Disable repositories
for repo_pattern in disablerepo:
if repo_pattern:
for repo in repos.get_matching(repo_pattern):
repo.disable()
# Enable repositories
for repo_pattern in enablerepo:
if repo_pattern:
for repo in repos.get_matching(repo_pattern):
repo.enable()
def _base(self, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot, sslverify):
"""Return a fully configured dnf Base object."""
base = dnf.Base()
self._configure_base(base, conf_file, disable_gpg_check, installroot, sslverify)
try:
# this method has been supported in dnf-4.2.17-6 or later
# https://bugzilla.redhat.com/show_bug.cgi?id=1788212
base.setup_loggers()
except AttributeError:
pass
try:
base.init_plugins(set(self.disable_plugin), set(self.enable_plugin))
base.pre_configure_plugins()
except AttributeError:
pass # older versions of dnf didn't require this and don't have these methods
self._specify_repositories(base, disablerepo, enablerepo)
try:
base.configure_plugins()
except AttributeError:
pass # older versions of dnf didn't require this and don't have these methods
try:
if self.update_cache:
try:
base.update_cache()
except dnf.exceptions.RepoError as e:
self.module.fail_json(
msg="{0}".format(to_text(e)),
results=[],
rc=1
)
base.fill_sack(load_system_repo='auto')
except dnf.exceptions.RepoError as e:
self.module.fail_json(
msg="{0}".format(to_text(e)),
results=[],
rc=1
)
add_security_filters = getattr(base, "add_security_filters", None)
if callable(add_security_filters):
filters = {}
if self.bugfix:
filters.setdefault('types', []).append('bugfix')
if self.security:
filters.setdefault('types', []).append('security')
if filters:
add_security_filters('eq', **filters)
else:
filters = []
if self.bugfix:
key = {'advisory_type__eq': 'bugfix'}
filters.append(base.sack.query().upgrades().filter(**key))
if self.security:
key = {'advisory_type__eq': 'security'}
filters.append(base.sack.query().upgrades().filter(**key))
if filters:
base._update_security_filters = filters
return base
def list_items(self, command):
"""List package info based on the command."""
# Rename updates to upgrades
if command == 'updates':
command = 'upgrades'
# Return the corresponding packages
if command in ['installed', 'upgrades', 'available']:
results = [
self._package_dict(package)
for package in getattr(self.base.sack.query(), command)()]
# Return the enabled repository ids
elif command in ['repos', 'repositories']:
results = [
{'repoid': repo.id, 'state': 'enabled'}
for repo in self.base.repos.iter_enabled()]
# Return any matching packages
else:
packages = dnf.subject.Subject(command).get_best_query(self.base.sack)
results = [self._package_dict(package) for package in packages]
self.module.exit_json(msg="", results=results)
def _is_installed(self, pkg):
installed = self.base.sack.query().installed()
package_spec = {}
name, arch = self._split_package_arch(pkg)
if arch:
package_spec['arch'] = arch
package_details = self._packagename_dict(pkg)
if package_details:
package_details['epoch'] = int(package_details['epoch'])
package_spec.update(package_details)
else:
package_spec['name'] = name
return bool(installed.filter(**package_spec))
def _is_newer_version_installed(self, pkg_name):
candidate_pkg = self._packagename_dict(pkg_name)
if not candidate_pkg:
# The user didn't provide a versioned rpm, so version checking is
# not required
return False
installed = self.base.sack.query().installed()
installed_pkg = installed.filter(name=candidate_pkg['name']).run()
if installed_pkg:
installed_pkg = installed_pkg[0]
# this looks weird but one is a dict and the other is a dnf.Package
evr_cmp = self._compare_evr(
installed_pkg.epoch, installed_pkg.version, installed_pkg.release,
candidate_pkg['epoch'], candidate_pkg['version'], candidate_pkg['release'],
)
return evr_cmp == 1
else:
return False
def _mark_package_install(self, pkg_spec, upgrade=False):
"""Mark the package for install."""
is_newer_version_installed = self._is_newer_version_installed(pkg_spec)
is_installed = self._is_installed(pkg_spec)
try:
if is_newer_version_installed:
if self.allow_downgrade:
# dnf only does allow_downgrade, we have to handle this ourselves
# because it allows a possibility for non-idempotent transactions
# on a system's package set (pending the yum repo has many old
# NVRs indexed)
if upgrade:
if is_installed: # Case 1
# TODO: Is this case reachable?
#
# _is_installed() demands a name (*not* NVR) or else is always False
# (wildcards are treated literally).
#
# Meanwhile, _is_newer_version_installed() demands something versioned
# or else is always false.
#
# I fail to see how they can both be true at the same time for any
# given pkg_spec. -re
self.base.upgrade(pkg_spec)
else: # Case 2
self.base.install(pkg_spec, strict=self.base.conf.strict)
else: # Case 3
self.base.install(pkg_spec, strict=self.base.conf.strict)
else: # Case 4, Nothing to do, report back
pass
elif is_installed: # A potentially older (or same) version is installed
if upgrade: # Case 5
self.base.upgrade(pkg_spec)
else: # Case 6, Nothing to do, report back
pass
else: # Case 7, The package is not installed, simply install it
self.base.install(pkg_spec, strict=self.base.conf.strict)
return {'failed': False, 'msg': '', 'failure': '', 'rc': 0}
except dnf.exceptions.MarkingError as e:
return {
'failed': True,
'msg': "No package {0} available.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
except dnf.exceptions.DepsolveError as e:
return {
'failed': True,
'msg': "Depsolve Error occurred for package {0}.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
except dnf.exceptions.Error as e:
if to_text("already installed") in to_text(e):
return {'failed': False, 'msg': '', 'failure': ''}
else:
return {
'failed': True,
'msg': "Unknown Error occurred for package {0}.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
def _whatprovides(self, filepath):
self.base.read_all_repos()
available = self.base.sack.query().available()
# Search in file
files_filter = available.filter(file=filepath)
# And Search in provides
pkg_spec = files_filter.union(available.filter(provides=filepath)).run()
if pkg_spec:
return pkg_spec[0].name
def _parse_spec_group_file(self):
pkg_specs, grp_specs, module_specs, filenames = [], [], [], []
already_loaded_comps = False # Only load this if necessary, it's slow
for name in self.names:
if '://' in name:
name = fetch_file(self.module, name)
filenames.append(name)
elif name.endswith(".rpm"):
filenames.append(name)
elif name.startswith('/'):
# like "dnf install /usr/bin/vi"
pkg_spec = self._whatprovides(name)
if pkg_spec:
pkg_specs.append(pkg_spec)
continue
elif name.startswith("@") or ('/' in name):
if not already_loaded_comps:
self.base.read_comps()
already_loaded_comps = True
grp_env_mdl_candidate = name[1:].strip()
if self.with_modules:
mdl = self.module_base._get_modules(grp_env_mdl_candidate)
if mdl[0]:
module_specs.append(grp_env_mdl_candidate)
else:
grp_specs.append(grp_env_mdl_candidate)
else:
grp_specs.append(grp_env_mdl_candidate)
else:
pkg_specs.append(name)
return pkg_specs, grp_specs, module_specs, filenames
def _update_only(self, pkgs):
not_installed = []
for pkg in pkgs:
if self._is_installed(pkg):
try:
if isinstance(to_text(pkg), text_type):
self.base.upgrade(pkg)
else:
self.base.package_upgrade(pkg)
except Exception as e:
self.module.fail_json(
msg="Error occurred attempting update_only operation: {0}".format(to_native(e)),
results=[],
rc=1,
)
else:
not_installed.append(pkg)
return not_installed
def _install_remote_rpms(self, filenames):
if int(dnf.__version__.split(".")[0]) >= 2:
pkgs = list(sorted(self.base.add_remote_rpms(list(filenames)), reverse=True))
else:
pkgs = []
try:
for filename in filenames:
pkgs.append(self.base.add_remote_rpm(filename))
except IOError as e:
if to_text("Can not load RPM file") in to_text(e):
self.module.fail_json(
msg="Error occurred attempting remote rpm install of package: {0}. {1}".format(filename, to_native(e)),
results=[],
rc=1,
)
if self.update_only:
self._update_only(pkgs)
else:
for pkg in pkgs:
try:
if self._is_newer_version_installed(self._package_dict(pkg)['nevra']):
if self.allow_downgrade:
self.base.package_install(pkg, strict=self.base.conf.strict)
else:
self.base.package_install(pkg, strict=self.base.conf.strict)
except Exception as e:
self.module.fail_json(
msg="Error occurred attempting remote rpm operation: {0}".format(to_native(e)),
results=[],
rc=1,
)
def _is_module_installed(self, module_spec):
if self.with_modules:
module_spec = module_spec.strip()
module_list, nsv = self.module_base._get_modules(module_spec)
enabled_streams = self.base._moduleContainer.getEnabledStream(nsv.name)
if enabled_streams:
if nsv.stream:
if nsv.stream in enabled_streams:
return True # The provided stream was found
else:
return False # The provided stream was not found
else:
return True # No stream provided, but module found
return False # seems like a sane default
def ensure(self):
response = {
'msg': "",
'changed': False,
'results': [],
'rc': 0
}
# Accumulate failures. Package management modules install what they can
# and fail with a message about what they can't.
failure_response = {
'msg': "",
'failures': [],
'results': [],
'rc': 1
}
# Autoremove is called alone
# Jump to remove path where base.autoremove() is run
if not self.names and self.autoremove:
self.names = []
self.state = 'absent'
if self.names == ['*'] and self.state == 'latest':
try:
self.base.upgrade_all()
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to upgrade all packages"
self.module.fail_json(**failure_response)
else:
pkg_specs, group_specs, module_specs, filenames = self._parse_spec_group_file()
pkg_specs = [p.strip() for p in pkg_specs]
filenames = [f.strip() for f in filenames]
groups = []
environments = []
for group_spec in (g.strip() for g in group_specs):
group = self.base.comps.group_by_pattern(group_spec)
if group:
groups.append(group.id)
else:
environment = self.base.comps.environment_by_pattern(group_spec)
if environment:
environments.append(environment.id)
else:
self.module.fail_json(
msg="No group {0} available.".format(group_spec),
results=[],
)
if self.state in ['installed', 'present']:
# Install files.
self._install_remote_rpms(filenames)
for filename in filenames:
response['results'].append("Installed {0}".format(filename))
# Install modules
if module_specs and self.with_modules:
for module in module_specs:
try:
if not self._is_module_installed(module):
response['results'].append("Module {0} installed.".format(module))
self.module_base.install([module])
self.module_base.enable([module])
except dnf.exceptions.MarkingErrors as e:
failure_response['failures'].append(' '.join((module, to_native(e))))
# Install groups.
for group in groups:
try:
group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
if group_pkg_count_installed == 0:
response['results'].append("Group {0} already installed.".format(group))
else:
response['results'].append("Group {0} installed.".format(group))
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to install group: {0}".format(group)
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
# In dnf 2.0 if all the mandatory packages in a group do
# not install, an error is raised. We want to capture
# this but still install as much as possible.
failure_response['failures'].append(" ".join((group, to_native(e))))
for environment in environments:
try:
self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to install environment: {0}".format(environment)
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((environment, to_native(e))))
if module_specs and not self.with_modules:
# This means that the group or env wasn't found in comps
self.module.fail_json(
msg="No group {0} available.".format(module_specs[0]),
results=[],
)
# Install packages.
if self.update_only:
not_installed = self._update_only(pkg_specs)
for spec in not_installed:
response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
else:
for pkg_spec in pkg_specs:
install_result = self._mark_package_install(pkg_spec)
if install_result['failed']:
if install_result['msg']:
failure_response['msg'] += install_result['msg']
failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
else:
if install_result['msg']:
response['results'].append(install_result['msg'])
elif self.state == 'latest':
# "latest" is same as "installed" for filenames.
self._install_remote_rpms(filenames)
for filename in filenames:
response['results'].append("Installed {0}".format(filename))
# Upgrade modules
if module_specs and self.with_modules:
for module in module_specs:
try:
if self._is_module_installed(module):
response['results'].append("Module {0} upgraded.".format(module))
self.module_base.upgrade([module])
except dnf.exceptions.MarkingErrors as e:
failure_response['failures'].append(' '.join((module, to_native(e))))
for group in groups:
try:
try:
self.base.group_upgrade(group)
response['results'].append("Group {0} upgraded.".format(group))
except dnf.exceptions.CompsError:
if not self.update_only:
# If not already installed, try to install.
group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
if group_pkg_count_installed == 0:
response['results'].append("Group {0} already installed.".format(group))
else:
response['results'].append("Group {0} installed.".format(group))
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((group, to_native(e))))
for environment in environments:
try:
try:
self.base.environment_upgrade(environment)
except dnf.exceptions.CompsError:
# If not already installed, try to install.
self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred attempting to install environment: {0}".format(environment)
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((environment, to_native(e))))
if self.update_only:
not_installed = self._update_only(pkg_specs)
for spec in not_installed:
response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
else:
for pkg_spec in pkg_specs:
# Previously we forced base.conf.best=True here.
# However in 2.11+ there is a self.nobest option, so defer to that.
# Note, however, that just because nobest isn't set, doesn't mean that
# base.conf.best is actually true. We only force it false in
# _configure_base(), we never set it to true, and it can default to false.
# Thus, we still need to explicitly set it here.
self.base.conf.best = not self.nobest
install_result = self._mark_package_install(pkg_spec, upgrade=True)
if install_result['failed']:
if install_result['msg']:
failure_response['msg'] += install_result['msg']
failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
else:
if install_result['msg']:
response['results'].append(install_result['msg'])
else:
# state == absent
if filenames:
self.module.fail_json(
msg="Cannot remove paths -- please specify package name.",
results=[],
)
# Remove modules
if module_specs and self.with_modules:
for module in module_specs:
try:
if self._is_module_installed(module):
response['results'].append("Module {0} removed.".format(module))
self.module_base.remove([module])
self.module_base.disable([module])
self.module_base.reset([module])
except dnf.exceptions.MarkingErrors as e:
failure_response['failures'].append(' '.join((module, to_native(e))))
for group in groups:
try:
self.base.group_remove(group)
except dnf.exceptions.CompsError:
# Group is already uninstalled.
pass
except AttributeError:
# Group either isn't installed or wasn't marked installed at install time
# because of DNF bug
#
# This is necessary until the upstream dnf API bug is fixed where installing
# a group via the dnf API doesn't actually mark the group as installed
# https://bugzilla.redhat.com/show_bug.cgi?id=1620324
pass
for environment in environments:
try:
self.base.environment_remove(environment)
except dnf.exceptions.CompsError:
# Environment is already uninstalled.
pass
installed = self.base.sack.query().installed()
for pkg_spec in pkg_specs:
# short-circuit installed check for wildcard matching
if '*' in pkg_spec:
try:
self.base.remove(pkg_spec)
except dnf.exceptions.MarkingError as e:
is_failure, handled_remove_error = self._sanitize_dnf_error_msg_remove(pkg_spec, to_native(e))
if is_failure:
failure_response['failures'].append('{0} - {1}'.format(pkg_spec, to_native(e)))
else:
response['results'].append(handled_remove_error)
continue
installed_pkg = dnf.subject.Subject(pkg_spec).get_best_query(
sack=self.base.sack).installed().run()
for pkg in installed_pkg:
self.base.remove(str(pkg))
# Like the dnf CLI we want to allow recursive removal of dependent
# packages
self.allowerasing = True
if self.autoremove:
self.base.autoremove()
try:
# NOTE for people who go down the rabbit hole of figuring out why
# resolve() throws DepsolveError here on dep conflict, but not when
# called from the CLI: It's controlled by conf.best. When best is
# set, Hawkey will fail the goal, and resolve() in dnf.base.Base
# will throw. Otherwise if it's not set, the update (install) will
# be (almost silently) removed from the goal, and Hawkey will report
# success. Note that in this case, similar to the CLI, skip_broken
# does nothing to help here, so we don't take it into account at
# all.
if not self.base.resolve(allow_erasing=self.allowerasing):
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.fail_json(**failure_response)
response['msg'] = "Nothing to do"
self.module.exit_json(**response)
else:
response['changed'] = True
# If packages got installed/removed, add them to the results.
# We do this early so we can use it for both check_mode and not.
if self.download_only:
install_action = 'Downloaded'
else:
install_action = 'Installed'
for package in self.base.transaction.install_set:
response['results'].append("{0}: {1}".format(install_action, package))
for package in self.base.transaction.remove_set:
response['results'].append("Removed: {0}".format(package))
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.fail_json(**failure_response)
if self.module.check_mode:
response['msg'] = "Check mode: No changes made, but would have if not in check mode"
self.module.exit_json(**response)
try:
if self.download_only and self.download_dir and self.base.conf.destdir:
dnf.util.ensure_dir(self.base.conf.destdir)
self.base.repos.all().pkgdir = self.base.conf.destdir
self.base.download_packages(self.base.transaction.install_set)
except dnf.exceptions.DownloadError as e:
self.module.fail_json(
msg="Failed to download packages: {0}".format(to_text(e)),
results=[],
)
# Validate GPG. This is NOT done in dnf.Base (it's done in the
# upstream CLI subclass of dnf.Base)
if not self.disable_gpg_check:
for package in self.base.transaction.install_set:
fail = False
gpgres, gpgerr = self.base._sig_check_pkg(package)
if gpgres == 0: # validated successfully
continue
elif gpgres == 1: # validation failed, install cert?
try:
self.base._get_key_for_package(package)
except dnf.exceptions.Error as e:
fail = True
else: # fatal error
fail = True
if fail:
msg = 'Failed to validate GPG signature for {0}: {1}'.format(package, gpgerr)
self.module.fail_json(msg)
if self.download_only:
# No further work left to do, and the results were already updated above.
# Just return them.
self.module.exit_json(**response)
else:
tid = self.base.do_transaction()
if tid is not None:
transaction = self.base.history.old([tid])[0]
if transaction.return_code:
failure_response['failures'].append(transaction.output())
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.fail_json(**failure_response)
self.module.exit_json(**response)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occurred: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
if to_text("already installed") in to_text(e):
response['changed'] = False
response['results'].append("Package already installed: {0}".format(to_native(e)))
self.module.exit_json(**response)
else:
failure_response['msg'] = "Unknown Error occurred: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
def run(self):
"""The main function."""
# Check if autoremove is called correctly
if self.autoremove:
if LooseVersion(dnf.__version__) < LooseVersion('2.0.1'):
self.module.fail_json(
msg="Autoremove requires dnf>=2.0.1. Current dnf version is %s" % dnf.__version__,
results=[],
)
# Check if download_dir is called correctly
if self.download_dir:
if LooseVersion(dnf.__version__) < LooseVersion('2.6.2'):
self.module.fail_json(
msg="download_dir requires dnf>=2.6.2. Current dnf version is %s" % dnf.__version__,
results=[],
)
if self.update_cache and not self.names and not self.list:
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot, self.sslverify
)
self.module.exit_json(
msg="Cache updated",
changed=False,
results=[],
rc=0
)
# Set state as installed by default
# This is not set in AnsibleModule() because the following shouldn't happen
# - dnf: autoremove=yes state=installed
if self.state is None:
self.state = 'installed'
if self.list:
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot, self.sslverify
)
self.list_items(self.list)
else:
# Note: base takes a long time to run so we want to check for failure
# before running it.
if not self.download_only and not dnf.util.am_i_root():
self.module.fail_json(
msg="This command has to be run under the root user.",
results=[],
)
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot, self.sslverify
)
if self.with_modules:
self.module_base = dnf.module.module_base.ModuleBase(self.base)
self.ensure()
def main():
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
# Extend yumdnf_argument_spec with dnf-specific features that will never be
# backported to yum because yum is now in "maintenance mode" upstream
yumdnf_argument_spec['argument_spec']['allowerasing'] = dict(default=False, type='bool')
yumdnf_argument_spec['argument_spec']['nobest'] = dict(default=False, type='bool')
module = AnsibleModule(
**yumdnf_argument_spec
)
module_implementation = DnfModule(module)
try:
module_implementation.run()
except dnf.exceptions.RepoError as de:
module.fail_json(
msg="Failed to synchronize repodata: {0}".format(to_native(de)),
rc=1,
results=[],
changed=False
)
if __name__ == '__main__':
main()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,423 |
Document Python version requirements for managed nodes
|
### Summary
Change 2fc73a9dc357e776dbbbfd035c86fe880415e60a suggests a documentation section "managed node requirements" but AFAICS, this does not exist.
I would be happy to construct such a section, if there is clarity on what the requirements are. I am presuming from context (_"and while Python 2 is not a concern anymore"_ from that change) that Python 2 was dropped as a supported platform on managed nodes at some point?
https://github.com/ansible/ansible/pull/77499#issuecomment-1095400484 suggests this can be backported to 2.11? So that would be the point Python 2 was dropped?
I'm presuming that as far back as 3.5 would still be supported for some time on the managed nodes? Is there any clarity around that?
### Issue Type
Bug Report
### Component Name
doc
### Ansible Version
```console
devel
```
### Configuration
```console
all
```
### OS / Environment
all
### Steps to Reproduce
read docs
### Expected Results
Managed node requirements shown
### Actual Results
```console
404 to https://docs.ansible.com/ansible/latest/dev_guide/:ref:managed-node-requirements
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
c.f. downstream issue https://review.opendev.org/c/zuul/zuul-jobs/+/851343/1
|
https://github.com/ansible/ansible/issues/78423
|
https://github.com/ansible/ansible/pull/78468
|
e0eb45e753e80e324e18c2ef79c1981e30c86ca2
|
1e6730aec4363744a390cd0357dbf9db3647a85c
| 2022-08-02T23:14:03Z |
python
| 2022-08-18T18:48:51Z |
docs/docsite/rst/dev_guide/developing_python_3.rst
|
.. _developing_python_3:
********************
Ansible and Python 3
********************
The ``ansible-core`` code runs Python 3 (for specific versions check :ref:`Control Node Requirements <control_node_requirements>`
Contributors to ``ansible-core`` and to Ansible Collections should be aware of the tips in this document so that they can write code
that will run on the same versions of Python as the rest of Ansible.
.. contents::
:local:
We do have some considerations depending on the types of Ansible code:
1. controller-side code - code that runs on the machine where you invoke :command:`/usr/bin/ansible`, only needs to support the controller's Python versions.
2. modules - the code which Ansible transmits to and invokes on the managed machine. Modules need to support the 'managed node' Python versions, with some exceptions.
3. shared ``module_utils`` code - the common code that is used by modules to perform tasks and sometimes used by controller-side code as well. Shared ``module_utils`` code needs to support the same range of Python as the modules.
However, the three types of code do not use the same string strategy. If you're developing a module or some ``module_utils`` code, be sure to read the section on string strategy carefully.
.. note:
- While modules can be written in any language, the above applies to code contributed to the core project, which only supports specific Python versions and Powershell for Windows.
Minimum version of Python 3.x and Python 2.x
============================================
See :ref:`Control Node Requirements <control_node_requirements>` and `Managed Node Requirements <:ref:managed-node-requirements>`_ for the
specific versions supported.
Your custom modules can support any version of Python (or other languages) you want, but the above are the requirements for the code contributed to the Ansible project.
Developing Ansible code that supports Python 2 and Python 3
===========================================================
The best place to start learning about writing code that supports both Python 2 and Python 3
is `Lennart Regebro's book: Porting to Python 3 <http://python3porting.com/>`_.
The book describes several strategies for porting to Python 3. The one we're
using is `to support Python 2 and Python 3 from a single code base
<http://python3porting.com/strategies.html#python-2-and-python-3-without-conversion>`_
Understanding strings in Python 2 and Python 3
----------------------------------------------
Python 2 and Python 3 handle strings differently, so when you write code that supports Python 3
you must decide what string model to use. Strings can be an array of bytes (like in C) or
they can be an array of text. Text is what we think of as letters, digits,
numbers, other printable symbols, and a small number of unprintable "symbols"
(control codes).
In Python 2, the two types for these (:class:`str <python:str>` for bytes and
:func:`unicode <python:unicode>` for text) are often used interchangeably. When dealing only
with ASCII characters, the strings can be combined, compared, and converted
from one type to another automatically. When non-ASCII characters are
introduced, Python 2 starts throwing exceptions due to not knowing what encoding
the non-ASCII characters should be in.
Python 3 changes this behavior by making the separation between bytes (:class:`bytes <python3:bytes>`)
and text (:class:`str <python3:str>`) more strict. Python 3 will throw an exception when
trying to combine and compare the two types. The programmer has to explicitly
convert from one type to the other to mix values from each.
In Python 3 it's immediately apparent to the programmer when code is
mixing the byte and text types inappropriately, whereas in Python 2, code that mixes those types
may work until a user causes an exception by entering non-ASCII input.
Python 3 forces programmers to proactively define a strategy for
working with strings in their program so that they don't mix text and byte strings unintentionally.
Ansible uses different strategies for working with strings in controller-side code, in
:ref: `modules <module_string_strategy>`, and in :ref:`module_utils <module_utils_string_strategy>` code.
.. _controller_string_strategy:
Controller string strategy: the Unicode Sandwich
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Until recently ``ansible-core`` supported Python 2.x and followed this strategy, known as the Unicode Sandwich (named
after Python 2's :func:`unicode <python:unicode>` text type). For Unicode Sandwich we know that
at the border of our code and the outside world (for example, file and network IO,
environment variables, and some library calls) we are going to receive bytes.
We need to transform these bytes into text and use that throughout the
internal portions of our code. When we have to send those strings back out to
the outside world we first convert the text back into bytes.
To visualize this, imagine a 'sandwich' consisting of a top and bottom layer
of bytes, a layer of conversion between, and all text type in the center.
For compatibility reasons you will see a bunch of custom functions we developed (``to_text``/``to_bytes``/``to_native``)
and while Python 2 is not a concern anymore we will continue to use them as they apply for other cases that make
dealing with unicode problematic.
While we will not be using it most of it anymore, the documentation below is still useful for those developing modules
that still need to support both Python 2 and 3 simultaneouslly.
Unicode Sandwich common borders: places to convert bytes to text in controller code
-----------------------------------------------------------------------------------
This is a partial list of places where we have to convert to and from bytes
when using the Unicode Sandwich string strategy. It's not exhaustive but
it gives you an idea of where to watch for problems.
Reading and writing to files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In Python 2, reading from files yields bytes. In Python 3, it can yield text.
To make code that's portable to both we don't make use of Python 3's ability
to yield text but instead do the conversion explicitly ourselves. For example:
.. code-block:: python
from ansible.module_utils.common.text.converters import to_text
with open('filename-with-utf8-data.txt', 'rb') as my_file:
b_data = my_file.read()
try:
data = to_text(b_data, errors='surrogate_or_strict')
except UnicodeError:
# Handle the exception gracefully -- usually by displaying a good
# user-centric error message that can be traced back to this piece
# of code.
pass
.. note:: Much of Ansible assumes that all encoded text is UTF-8. At some
point, if there is demand for other encodings we may change that, but for
now it is safe to assume that bytes are UTF-8.
Writing to files is the opposite process:
.. code-block:: python
from ansible.module_utils.common.text.converters import to_bytes
with open('filename.txt', 'wb') as my_file:
my_file.write(to_bytes(some_text_string))
Note that we don't have to catch :exc:`UnicodeError` here because we're
transforming to UTF-8 and all text strings in Python can be transformed back
to UTF-8.
Filesystem interaction
^^^^^^^^^^^^^^^^^^^^^^
Dealing with filenames often involves dropping back to bytes because on UNIX-like
systems filenames are bytes. On Python 2, if we pass a text string to these
functions, the text string will be converted to a byte string inside of the
function and a traceback will occur if non-ASCII characters are present. In
Python 3, a traceback will only occur if the text string can't be decoded in
the current locale, but it's still good to be explicit and have code which
works on both versions:
.. code-block:: python
import os.path
from ansible.module_utils.common.text.converters import to_bytes
filename = u'/var/tmp/くらとみ.txt'
f = open(to_bytes(filename), 'wb')
mtime = os.path.getmtime(to_bytes(filename))
b_filename = os.path.expandvars(to_bytes(filename))
if os.path.exists(to_bytes(filename)):
pass
When you are only manipulating a filename as a string without talking to the
filesystem (or a C library which talks to the filesystem) you can often get
away without converting to bytes:
.. code-block:: python
import os.path
os.path.join(u'/var/tmp/café', u'くらとみ')
os.path.split(u'/var/tmp/café/くらとみ')
On the other hand, if the code needs to manipulate the filename and also talk
to the filesystem, it can be more convenient to transform to bytes right away
and manipulate in bytes.
.. warning:: Make sure all variables passed to a function are the same type.
If you're working with something like :func:`python3:os.path.join` which takes
multiple strings and uses them in combination, you need to make sure that
all the types are the same (either all bytes or all text). Mixing
bytes and text will cause tracebacks.
Interacting with other programs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Interacting with other programs goes through the operating system and
C libraries and operates on things that the UNIX kernel defines. These
interfaces are all byte-oriented so the Python interface is byte oriented as
well. On both Python 2 and Python 3, byte strings should be given to Python's
subprocess library and byte strings should be expected back from it.
One of the main places in Ansible's controller code that we interact with
other programs is the connection plugins' ``exec_command`` methods. These
methods transform any text strings they receive in the command (and arguments
to the command) to execute into bytes and return stdout and stderr as byte strings
Higher level functions (like action plugins' ``_low_level_execute_command``)
transform the output into text strings.
.. _module_string_strategy:
Module string strategy: Native String
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In modules we use a strategy known as Native Strings. This makes things
easier on the community members who maintain so many of Ansible's
modules, by not breaking backwards compatibility by
mandating that all strings inside of modules are text and converting between
text and bytes at the borders.
Native strings refer to the type that Python uses when you specify a bare
string literal:
.. code-block:: python
"This is a native string"
In Python 2, these are byte strings. In Python 3 these are text strings. Modules should be
coded to expect bytes on Python 2 and text on Python 3.
.. _module_utils_string_strategy:
Module_utils string strategy: hybrid
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In ``module_utils`` code we use a hybrid string strategy. Although Ansible's
``module_utils`` code is largely like module code, some pieces of it are
used by the controller as well. So it needs to be compatible with modules
and with the controller's assumptions, particularly the string strategy.
The module_utils code attempts to accept native strings as input
to its functions and emit native strings as their output.
In ``module_utils`` code:
* Functions **must** accept string parameters as either text strings or byte strings.
* Functions may return either the same type of string as they were given or the native string type for the Python version they are run on.
* Functions that return strings **must** document whether they return strings of the same type as they were given or native strings.
Module-utils functions are therefore often very defensive in nature.
They convert their string parameters into text (using ``ansible.module_utils.common.text.converters.to_text``)
at the beginning of the function, do their work, and then convert
the return values into the native string type (using ``ansible.module_utils.common.text.converters.to_native``)
or back to the string type that their parameters received.
Tips, tricks, and idioms for Python 2/Python 3 compatibility
------------------------------------------------------------
Use forward-compatibility boilerplate
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Use the following boilerplate code at the top of all python files
to make certain constructs act the same way on Python 2 and Python 3:
.. code-block:: python
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
``__metaclass__ = type`` makes all classes defined in the file into new-style
classes without explicitly inheriting from :class:`object <python3:object>`.
The ``__future__`` imports do the following:
:absolute_import: Makes imports look in :data:`sys.path <python3:sys.path>` for the modules being
imported, skipping the directory in which the module doing the importing
lives. If the code wants to use the directory in which the module doing
the importing, there's a new dot notation to do so.
:division: Makes division of integers always return a float. If you need to
find the quotient use ``x // y`` instead of ``x / y``.
:print_function: Changes :func:`print <python3:print>` from a keyword into a function.
.. seealso::
* `PEP 0328: Absolute Imports <https://www.python.org/dev/peps/pep-0328/#guido-s-decision>`_
* `PEP 0238: Division <https://www.python.org/dev/peps/pep-0238>`_
* `PEP 3105: Print function <https://www.python.org/dev/peps/pep-3105>`_
Prefix byte strings with ``b_``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Since mixing text and bytes types leads to tracebacks we want to be clear
about what variables hold text and what variables hold bytes. We do this by
prefixing any variable holding bytes with ``b_``. For instance:
.. code-block:: python
filename = u'/var/tmp/café.txt'
b_filename = to_bytes(filename)
with open(b_filename) as f:
data = f.read()
We do not prefix the text strings instead because we only operate
on byte strings at the borders, so there are fewer variables that need bytes
than text.
Import Ansible's bundled Python ``six`` library
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The third-party Python `six <https://pypi.org/project/six/>`_ library exists
to help projects create code that runs on both Python 2 and Python 3. Ansible
includes a version of the library in module_utils so that other modules can use it
without requiring that it is installed on the remote system. To make use of
it, import it like this:
.. code-block:: python
from ansible.module_utils import six
.. note:: Ansible can also use a system copy of six
Ansible will use a system copy of six if the system copy is a later
version than the one Ansible bundles.
Handle exceptions with ``as``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In order for code to function on Python 2.6+ and Python 3, use the
new exception-catching syntax which uses the ``as`` keyword:
.. code-block:: python
try:
a = 2/0
except ValueError as e:
module.fail_json(msg="Tried to divide by zero: %s" % e)
Do **not** use the following syntax as it will fail on every version of Python 3:
.. This code block won't highlight because python2 isn't recognized. This is necessary to pass tests under python 3.
.. code-block:: none
try:
a = 2/0
except ValueError, e:
module.fail_json(msg="Tried to divide by zero: %s" % e)
Update octal numbers
^^^^^^^^^^^^^^^^^^^^
In Python 2.x, octal literals could be specified as ``0755``. In Python 3,
octals must be specified as ``0o755``.
String formatting for controller code
-------------------------------------
Use ``str.format()`` for Python 2.6 compatibility
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Starting in Python 2.6, strings gained a method called ``format()`` to put
strings together. However, one commonly used feature of ``format()`` wasn't
added until Python 2.7, so you need to remember not to use it in Ansible code:
.. code-block:: python
# Does not work in Python 2.6!
new_string = "Dear {}, Welcome to {}".format(username, location)
# Use this instead
new_string = "Dear {0}, Welcome to {1}".format(username, location)
Both of the format strings above map positional arguments of the ``format()``
method into the string. However, the first version doesn't work in
Python 2.6. Always remember to put numbers into the placeholders so the code
is compatible with Python 2.6.
.. seealso::
Python documentation on format strings:
- `format strings in 2.6 <https://docs.python.org/2.6/library/string.html#formatstrings>`_
- `format strings in 3.x <https://docs.python.org/3/library/string.html#formatstrings>`_
Use percent format with byte strings
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In Python 3.x, byte strings do not have a ``format()`` method. However, it
does have support for the older, percent-formatting.
.. code-block:: python
b_command_line = b'ansible-playbook --become-user %s -K %s' % (user, playbook_file)
.. note:: Percent formatting added in Python 3.5
Percent formatting of byte strings was added back into Python 3 in 3.5.
This isn't a problem for us because Python 3.5 is our minimum version.
However, if you happen to be testing Ansible code with Python 3.4 or
earlier, you will find that the byte string formatting here won't work.
Upgrade to Python 3.5 to test.
.. seealso::
Python documentation on `percent formatting <https://docs.python.org/3/library/stdtypes.html#string-formatting>`_
.. _testing_modules_python_3:
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,423 |
Document Python version requirements for managed nodes
|
### Summary
Change 2fc73a9dc357e776dbbbfd035c86fe880415e60a suggests a documentation section "managed node requirements" but AFAICS, this does not exist.
I would be happy to construct such a section, if there is clarity on what the requirements are. I am presuming from context (_"and while Python 2 is not a concern anymore"_ from that change) that Python 2 was dropped as a supported platform on managed nodes at some point?
https://github.com/ansible/ansible/pull/77499#issuecomment-1095400484 suggests this can be backported to 2.11? So that would be the point Python 2 was dropped?
I'm presuming that as far back as 3.5 would still be supported for some time on the managed nodes? Is there any clarity around that?
### Issue Type
Bug Report
### Component Name
doc
### Ansible Version
```console
devel
```
### Configuration
```console
all
```
### OS / Environment
all
### Steps to Reproduce
read docs
### Expected Results
Managed node requirements shown
### Actual Results
```console
404 to https://docs.ansible.com/ansible/latest/dev_guide/:ref:managed-node-requirements
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
c.f. downstream issue https://review.opendev.org/c/zuul/zuul-jobs/+/851343/1
|
https://github.com/ansible/ansible/issues/78423
|
https://github.com/ansible/ansible/pull/78468
|
e0eb45e753e80e324e18c2ef79c1981e30c86ca2
|
1e6730aec4363744a390cd0357dbf9db3647a85c
| 2022-08-02T23:14:03Z |
python
| 2022-08-18T18:48:51Z |
docs/docsite/rst/installation_guide/intro_installation.rst
|
.. _installation_guide:
.. _intro_installation_guide:
******************
Installing Ansible
******************
Ansible is an agentless automation tool that you install on a single host (referred to as the control node). From the control node, Ansible can manage an entire fleet of machines and other devices (referred to as managed nodes) remotely with SSH, Powershell remoting, and numerous other transports, all from a simple command-line interface with no databases or daemons required.
.. contents::
:local:
.. _control_node_requirements:
Control node requirements
=========================
For your control node (the machine that runs Ansible), you can use nearly any UNIX-like machine with Python 3.9 or newer installed. This includes Red Hat, Debian, Ubuntu, macOS, BSDs, and Windows under a `Windows Subsystem for Linux (WSL) distribution <https://docs.microsoft.com/en-us/windows/wsl/about>`_. Windows without WSL is not natively supported as a control node; see `Matt Davis' blog post <http://blog.rolpdog.com/2020/03/why-no-ansible-controller-for-windows.html>`_ for more information.
.. _getting_ansible:
.. _what_version:
Selecting an Ansible package and version to install
====================================================
Ansible's community packages are distributed in two ways: a minimalist language and runtime package called ``ansible-core``, and a much larger "batteries included" package called ``ansible``, which adds a community-curated selection of :ref:`Ansible Collections <collections>` for automating a wide variety of devices. Choose the package that fits your needs; The following instructions use ``ansible``, but you can substitute ``ansible-core`` if you prefer to start with a more minimal package and separately install only the Ansible Collections you require. The ``ansible`` or ``ansible-core`` packages may be available in your operating systems package manager, and you are free to install these packages with your preferred method. These installation instructions only cover the officially supported means of installing the python package with ``pip``.
Installing and upgrading Ansible
================================
Locating Python
---------------
Locate and remember the path to the Python interpreter you wish to use to run Ansible. The following instructions refer to this Python as ``python3``. For example, if you've determined that you want the Python at ``/usr/bin/python3.9`` to be the one that you'll install Ansible under, specify that instead of ``python3``.
Ensuring ``pip`` is available
-----------------------------
To verify whether ``pip`` is already installed for your preferred Python:
.. code-block:: console
$ python3 -m pip -V
If all is well, you should see something like the following:
.. code-block:: console
$ python3 -m pip -V
pip 21.0.1 from /usr/lib/python3.9/site-packages/pip (python 3.9)
If so, ``pip`` is available, and you can move on to the :ref:`next step <pip_install>`.
If you see an error like ``No module named pip``, you'll need to install ``pip`` under your chosen Python interpreter before proceeding. This may mean installing an additional OS package (for example, ``python3-pip``), or installing the latest ``pip`` directly from the Python Packaging Authority by running the following:
.. code-block:: console
$ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
$ python3 get-pip.py --user
You may need to perform some additional configuration before you are able to run Ansible. See the Python documentation on `installing to the user site`_ for more information.
.. _installing to the user site: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site
.. _pip_install:
Installing Ansible
------------------
Use ``pip`` in your selected Python environment to install the Ansible package of your choice for the current user:
.. code-block:: console
$ python3 -m pip install --user ansible
Alternately, you can install a specific version of ``ansible-core`` in this Python environment:
.. code-block:: console
$ python3 -m pip install --user ansible-core==2.12.3
.. _pip_upgrade:
Upgrading Ansible
-----------------
To upgrade an existing Ansible installation in this Python environment to the latest released version, simply add ``--upgrade`` to the command above:
.. code-block:: console
$ python3 -m pip install --upgrade --user ansible
Confirming your installation
----------------------------
You can test that Ansible is installed correctly by checking the version:
.. code-block:: console
$ ansible --version
The version displayed by this command is for the associated ``ansible-core`` package that has been installed.
To check the version of the ``ansible`` package that has been installed:
.. code-block:: console
$ python3 -m pip show ansible
.. _development_install:
Installing for development
==========================
If you are testing new features, fixing bugs, or otherwise working with the development team on changes to the core code, you can install and run the source from GitHub.
.. note::
You should only install and run the ``devel`` branch if you are modifying ``ansible-core`` or trying out features under development. This is a rapidly changing source of code and can become unstable at any point.
For more information on getting involved in the Ansible project, see the :ref:`ansible_community_guide`. For more information on creating Ansible modules and Collections, see the :ref:`developer_guide`.
.. _from_pip_devel:
Installing ``devel`` from GitHub with ``pip``
---------------------------------------------
You can install the ``devel`` branch of ``ansible-core`` directly from GitHub with ``pip``:
.. code-block:: console
$ python3 -m pip install --user https://github.com/ansible/ansible/archive/devel.tar.gz
You can replace ``devel`` in the URL mentioned above, with any other branch or tag on GitHub to install older versions of Ansible, tagged alpha or beta versions, and release candidates.
.. _from_source:
Running the ``devel`` branch from a clone
-----------------------------------------
``ansible-core`` is easy to run from source. You do not need ``root`` permissions to use it and there is no software to actually install. No daemons or database setup are required.
#. Clone the ``ansible-core`` repository
.. code-block:: console
$ git clone https://github.com/ansible/ansible.git
$ cd ./ansible
#. Setup the Ansible environment
* Using Bash
.. code-block:: console
$ source ./hacking/env-setup
* Using Fish
.. code-block:: console
$ source ./hacking/env-setup.fish
* To suppress spurious warnings/errors, use ``-q``
.. code-block:: console
$ source ./hacking/env-setup -q
#. Install Python dependencies
.. code-block:: console
$ python3 -m pip install --user -r ./requirements.txt
#. Update the ``devel`` branch of ``ansible-core`` on your local machine
Use pull-with-rebase so any local changes are replayed.
.. code-block:: console
$ git pull --rebase
.. _shell_completion:
Adding Ansible command shell completion
=======================================
You can add shell completion of the Ansible command line utilities by installing an optional dependency called ``argcomplete``. ``argcomplete`` supports bash, and has limited support for zsh and tcsh.
For more information about installation and configuration, see the `argcomplete documentation <https://kislyuk.github.io/argcomplete/>`_.
Installing ``argcomplete``
--------------------------
.. code-block:: console
$ python3 -m pip install --user argcomplete
Configuring ``argcomplete``
---------------------------
There are 2 ways to configure ``argcomplete`` to allow shell completion of the Ansible command line utilities: globally or per command.
Global configuration
^^^^^^^^^^^^^^^^^^^^
Global completion requires bash 4.2.
.. code-block:: console
$ activate-global-python-argcomplete
This will write a bash completion file to a global location. Use ``--dest`` to change the location.
Per command configuration
^^^^^^^^^^^^^^^^^^^^^^^^^
If you do not have bash 4.2, you must register each script independently.
.. code-block:: console
$ eval $(register-python-argcomplete ansible)
$ eval $(register-python-argcomplete ansible-config)
$ eval $(register-python-argcomplete ansible-console)
$ eval $(register-python-argcomplete ansible-doc)
$ eval $(register-python-argcomplete ansible-galaxy)
$ eval $(register-python-argcomplete ansible-inventory)
$ eval $(register-python-argcomplete ansible-playbook)
$ eval $(register-python-argcomplete ansible-pull)
$ eval $(register-python-argcomplete ansible-vault)
You should place the above commands into your shells profile file such as ``~/.profile`` or ``~/.bash_profile``.
Using ``argcomplete`` with zsh or tcsh
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
See the `argcomplete documentation <https://kislyuk.github.io/argcomplete/>`_.
.. seealso::
:ref:`intro_adhoc`
Examples of basic commands
:ref:`working_with_playbooks`
Learning ansible's configuration management language
:ref:`installation_faqs`
Ansible Installation related to FAQs
`Mailing List <https://groups.google.com/group/ansible-project>`_
Questions? Help? Ideas? Stop by the list on Google Groups
:ref:`communication_irc`
How to join Ansible chat channels
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,516 |
some galaxy cli functions return non-zero on success
|
### Summary
Some functions (role search specifically) have an exit code of 1, even if the command was successful. Poking through the code, it looks as though in some of the cli functions they return "True" instead of an integer.
https://github.com/ansible/ansible/blob/devel/lib/ansible/cli/galaxy.py#L1713
### Issue Type
Bug Report
### Component Name
galaxy
### Ansible Version
```console
$ ansible --version
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out features under development. This is a rapidly changing source of code
and can become unstable at any point.
ansible [core 2.14.0.dev0] (devel 0a199a07ed) last updated 2022/08/11 11:04:53 (GMT -400)
config file = None
configured module search path = ['/home/jtanner/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/jtanner/workspace/github/jctanner.redhat/ansible/lib/ansible
ansible collection location = /home/jtanner/.ansible/collections:/usr/share/ansible/collections
executable location = /tmp/gng_testing/bin/ansible
python version = 3.10.5 (main, Jun 9 2022, 00:00:00) [GCC 11.3.1 20220421 (Red Hat 11.3.1-2)] (/tmp/gng_testing/bin/python)
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
```
### OS / Environment
any
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```shell
(gng_testing) [jtanner@p1 ~]$ ansible-galaxy -vvv role search --author=geerlingguy docker
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out features under development. This is a rapidly changing source of code
and can become unstable at any point.
Found 10 roles matching your search:
Name Description
---- -----------
geerlingguy.awx Installs and configures AWX (Ansible Tower's open source version).
geerlingguy.awx-container Ansible AWX container for Docker.
geerlingguy.containerd containerd.io for Linux.
geerlingguy.docker Docker for Linux.
geerlingguy.docker_arm Docker setup for Rasbperry Pi and ARM-based devices.
geerlingguy.ecr_container_build ECR docker image build and push management role.
geerlingguy.firewall Simple iptables firewall for most Unix-like systems.
geerlingguy.k8s_manifests Kubernetes manifest management role.
geerlingguy.kubernetes Kubernetes for Linux.
geerlingguy.pip Pip (Python package manager) for Linux.
(gng_testing) [jtanner@p1 ~]$ echo $?
1
```
### Expected Results
return code of zero.
### Actual Results
```console
return code of 1
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78516
|
https://github.com/ansible/ansible/pull/78578
|
1e6730aec4363744a390cd0357dbf9db3647a85c
|
4c8b8a06be5a851669b8e55f9a0e83b751db9b83
| 2022-08-11T15:19:57Z |
python
| 2022-08-18T19:15:51Z |
changelogs/fragments/78516-galaxy-cli-exit-codes.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,516 |
some galaxy cli functions return non-zero on success
|
### Summary
Some functions (role search specifically) have an exit code of 1, even if the command was successful. Poking through the code, it looks as though in some of the cli functions they return "True" instead of an integer.
https://github.com/ansible/ansible/blob/devel/lib/ansible/cli/galaxy.py#L1713
### Issue Type
Bug Report
### Component Name
galaxy
### Ansible Version
```console
$ ansible --version
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out features under development. This is a rapidly changing source of code
and can become unstable at any point.
ansible [core 2.14.0.dev0] (devel 0a199a07ed) last updated 2022/08/11 11:04:53 (GMT -400)
config file = None
configured module search path = ['/home/jtanner/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/jtanner/workspace/github/jctanner.redhat/ansible/lib/ansible
ansible collection location = /home/jtanner/.ansible/collections:/usr/share/ansible/collections
executable location = /tmp/gng_testing/bin/ansible
python version = 3.10.5 (main, Jun 9 2022, 00:00:00) [GCC 11.3.1 20220421 (Red Hat 11.3.1-2)] (/tmp/gng_testing/bin/python)
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
```
### OS / Environment
any
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```shell
(gng_testing) [jtanner@p1 ~]$ ansible-galaxy -vvv role search --author=geerlingguy docker
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out features under development. This is a rapidly changing source of code
and can become unstable at any point.
Found 10 roles matching your search:
Name Description
---- -----------
geerlingguy.awx Installs and configures AWX (Ansible Tower's open source version).
geerlingguy.awx-container Ansible AWX container for Docker.
geerlingguy.containerd containerd.io for Linux.
geerlingguy.docker Docker for Linux.
geerlingguy.docker_arm Docker setup for Rasbperry Pi and ARM-based devices.
geerlingguy.ecr_container_build ECR docker image build and push management role.
geerlingguy.firewall Simple iptables firewall for most Unix-like systems.
geerlingguy.k8s_manifests Kubernetes manifest management role.
geerlingguy.kubernetes Kubernetes for Linux.
geerlingguy.pip Pip (Python package manager) for Linux.
(gng_testing) [jtanner@p1 ~]$ echo $?
1
```
### Expected Results
return code of zero.
### Actual Results
```console
return code of 1
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78516
|
https://github.com/ansible/ansible/pull/78578
|
1e6730aec4363744a390cd0357dbf9db3647a85c
|
4c8b8a06be5a851669b8e55f9a0e83b751db9b83
| 2022-08-11T15:19:57Z |
python
| 2022-08-18T19:15:51Z |
lib/ansible/cli/galaxy.py
|
#!/usr/bin/env python
# Copyright: (c) 2013, James Cammarata <[email protected]>
# Copyright: (c) 2018-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
import json
import os.path
import re
import shutil
import sys
import textwrap
import time
from yaml.error import YAMLError
import ansible.constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection import (
build_collection,
download_collections,
find_existing_collections,
install_collections,
publish_collection,
validate_collection_name,
validate_collection_path,
verify_collections,
SIGNATURE_COUNT_RE,
)
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
from ansible.galaxy.collection.gpg import GPG_ERROR_MAP
from ansible.galaxy.dependency_resolution.dataclasses import Requirement
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken, NoTokenSentinel
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.common.collections import is_iterable
from ansible.module_utils.common.yaml import yaml_dump, yaml_load
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils import six
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.playbook.role.requirement import RoleRequirement
from ansible.template import Templar
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
display = Display()
urlparse = six.moves.urllib.parse.urlparse
# config definition by position: name, required, type
SERVER_DEF = [
('url', True, 'str'),
('username', False, 'str'),
('password', False, 'str'),
('token', False, 'str'),
('auth_url', False, 'str'),
('v3', False, 'bool'),
('validate_certs', False, 'bool'),
('client_id', False, 'str'),
('timeout', False, 'int'),
]
# config definition fields
SERVER_ADDITIONAL = {
'v3': {'default': 'False'},
'validate_certs': {'default': True, 'cli': [{'name': 'validate_certs'}]},
'timeout': {'default': '60', 'cli': [{'name': 'timeout'}]},
'token': {'default': None},
}
# override default if the generic is set
if C.GALAXY_IGNORE_CERTS is not None:
SERVER_ADDITIONAL['validate_certs'].update({'default': not C.GALAXY_IGNORE_CERTS})
def with_collection_artifacts_manager(wrapped_method):
"""Inject an artifacts manager if not passed explicitly.
This decorator constructs a ConcreteArtifactsManager and maintains
the related temporary directory auto-cleanup around the target
method invocation.
"""
def method_wrapper(*args, **kwargs):
if 'artifacts_manager' in kwargs:
return wrapped_method(*args, **kwargs)
artifacts_manager_kwargs = {'validate_certs': context.CLIARGS['validate_certs']}
keyring = context.CLIARGS.get('keyring', None)
if keyring is not None:
artifacts_manager_kwargs.update({
'keyring': GalaxyCLI._resolve_path(keyring),
'required_signature_count': context.CLIARGS.get('required_valid_signature_count', None),
'ignore_signature_errors': context.CLIARGS.get('ignore_gpg_errors', None),
})
with ConcreteArtifactsManager.under_tmpdir(
C.DEFAULT_LOCAL_TMP,
**artifacts_manager_kwargs
) as concrete_artifact_cm:
kwargs['artifacts_manager'] = concrete_artifact_cm
return wrapped_method(*args, **kwargs)
return method_wrapper
def _display_header(path, h1, h2, w1=10, w2=7):
display.display('\n# {0}\n{1:{cwidth}} {2:{vwidth}}\n{3} {4}\n'.format(
path,
h1,
h2,
'-' * max([len(h1), w1]), # Make sure that the number of dashes is at least the width of the header
'-' * max([len(h2), w2]),
cwidth=w1,
vwidth=w2,
))
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
def _display_collection(collection, cwidth=10, vwidth=7, min_cwidth=10, min_vwidth=7):
display.display('{fqcn:{cwidth}} {version:{vwidth}}'.format(
fqcn=to_text(collection.fqcn),
version=collection.ver,
cwidth=max(cwidth, min_cwidth), # Make sure the width isn't smaller than the header
vwidth=max(vwidth, min_vwidth)
))
def _get_collection_widths(collections):
if not is_iterable(collections):
collections = (collections, )
fqcn_set = {to_text(c.fqcn) for c in collections}
version_set = {to_text(c.ver) for c in collections}
fqcn_length = len(max(fqcn_set, key=len))
version_length = len(max(version_set, key=len))
return fqcn_length, version_length
def validate_signature_count(value):
match = re.match(SIGNATURE_COUNT_RE, value)
if match is None:
raise ValueError(f"{value} is not a valid signature count value")
return value
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
name = 'ansible-galaxy'
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
def __init__(self, args):
self._raw_args = args
self._implicit_role = False
if len(args) > 1:
# Inject role into sys.argv[1] as a backwards compatibility step
if args[1] not in ['-h', '--help', '--version'] and 'role' not in args and 'collection' not in args:
# TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
args.insert(1, 'role')
self._implicit_role = True
# since argparse doesn't allow hidden subparsers, handle dead login arg from raw args after "role" normalization
if args[1:3] == ['role', 'login']:
display.error(
"The login command was removed in late 2020. An API key is now required to publish roles or collections "
"to Galaxy. The key can be found at https://galaxy.ansible.com/me/preferences, and passed to the "
"ansible-galaxy CLI via a file at {0} or (insecurely) via the `--token` "
"command-line argument.".format(to_text(C.GALAXY_TOKEN_PATH)))
sys.exit(1)
self.api_servers = []
self.galaxy = None
self._api = None
super(GalaxyCLI, self).__init__(args)
def init_parser(self):
''' create an options parser for bin/ansible '''
super(GalaxyCLI, self).init_parser(
desc="Perform various Role and Collection related operations.",
)
# Common arguments that apply to more than 1 action
common = opt_help.argparse.ArgumentParser(add_help=False)
common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL')
common.add_argument('--token', '--api-key', dest='api_key',
help='The Ansible Galaxy API key which can be found at '
'https://galaxy.ansible.com/me/preferences.')
common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs', help='Ignore SSL certificate validation errors.', default=None)
common.add_argument('--timeout', dest='timeout', type=int,
help="The time to wait for operations against the galaxy server, defaults to 60s.")
opt_help.add_verbosity_options(common)
force = opt_help.argparse.ArgumentParser(add_help=False)
force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role or collection')
github = opt_help.argparse.ArgumentParser(add_help=False)
github.add_argument('github_user', help='GitHub username')
github.add_argument('github_repo', help='GitHub repository')
offline = opt_help.argparse.ArgumentParser(add_help=False)
offline.add_argument('--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
roles_path = opt_help.argparse.ArgumentParser(add_help=False)
roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
help='The path to the directory containing your roles. The default is the first '
'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
collections_path = opt_help.argparse.ArgumentParser(add_help=False)
collections_path.add_argument('-p', '--collections-path', dest='collections_path', type=opt_help.unfrack_path(pathsep=True),
default=AnsibleCollectionConfig.collection_paths,
action=opt_help.PrependListAction,
help="One or more directories to search for collections in addition "
"to the default COLLECTIONS_PATHS. Separate multiple paths "
"with '{0}'.".format(os.path.pathsep))
cache_options = opt_help.argparse.ArgumentParser(add_help=False)
cache_options.add_argument('--clear-response-cache', dest='clear_response_cache', action='store_true',
default=False, help='Clear the existing server response cache.')
cache_options.add_argument('--no-cache', dest='no_cache', action='store_true', default=False,
help='Do not use the server response cache.')
# Add sub parser for the Galaxy role type (role or collection)
type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
type_parser.required = True
# Add sub parser for the Galaxy collection actions
collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
collection_parser.required = True
self.add_download_options(collection_parser, parents=[common, cache_options])
self.add_init_options(collection_parser, parents=[common, force])
self.add_build_options(collection_parser, parents=[common, force])
self.add_publish_options(collection_parser, parents=[common])
self.add_install_options(collection_parser, parents=[common, force, cache_options])
self.add_list_options(collection_parser, parents=[common, collections_path])
self.add_verify_options(collection_parser, parents=[common, collections_path])
# Add sub parser for the Galaxy role actions
role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.')
role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action')
role_parser.required = True
self.add_init_options(role_parser, parents=[common, force, offline])
self.add_remove_options(role_parser, parents=[common, roles_path])
self.add_delete_options(role_parser, parents=[common, github])
self.add_list_options(role_parser, parents=[common, roles_path])
self.add_search_options(role_parser, parents=[common])
self.add_import_options(role_parser, parents=[common, github])
self.add_setup_options(role_parser, parents=[common, roles_path])
self.add_info_options(role_parser, parents=[common, roles_path, offline])
self.add_install_options(role_parser, parents=[common, force, roles_path])
def add_download_options(self, parser, parents=None):
download_parser = parser.add_parser('download', parents=parents,
help='Download collections and their dependencies as a tarball for an '
'offline install.')
download_parser.set_defaults(func=self.execute_download)
download_parser.add_argument('args', help='Collection(s)', metavar='collection', nargs='*')
download_parser.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download collection(s) listed as dependencies.")
download_parser.add_argument('-p', '--download-path', dest='download_path',
default='./collections',
help='The directory to download the collections to.')
download_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be downloaded.')
download_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
def add_init_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
init_parser = parser.add_parser('init', parents=parents,
help='Initialize new {0} with the base structure of a '
'{0}.'.format(galaxy_type))
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--init-path', dest='init_path', default='./',
help='The path in which the skeleton {0} will be created. The default is the '
'current working directory.'.format(galaxy_type))
init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type),
default=C.GALAXY_COLLECTION_SKELETON if galaxy_type == 'collection' else C.GALAXY_ROLE_SKELETON,
help='The path to a {0} skeleton that the new {0} should be based '
'upon.'.format(galaxy_type))
obj_name_kwargs = {}
if galaxy_type == 'collection':
obj_name_kwargs['type'] = validate_collection_name
init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()),
**obj_name_kwargs)
if galaxy_type == 'role':
init_parser.add_argument('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', "
"'apb' and 'network'.")
def add_remove_options(self, parser, parents=None):
remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
remove_parser.set_defaults(func=self.execute_remove)
remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
def add_delete_options(self, parser, parents=None):
delete_parser = parser.add_parser('delete', parents=parents,
help='Removes the role from Galaxy. It does not remove or alter the actual '
'GitHub repository.')
delete_parser.set_defaults(func=self.execute_delete)
def add_list_options(self, parser, parents=None):
galaxy_type = 'role'
if parser.metavar == 'COLLECTION_ACTION':
galaxy_type = 'collection'
list_parser = parser.add_parser('list', parents=parents,
help='Show the name and version of each {0} installed in the {0}s_path.'.format(galaxy_type))
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument(galaxy_type, help=galaxy_type.capitalize(), nargs='?', metavar=galaxy_type)
if galaxy_type == 'collection':
list_parser.add_argument('--format', dest='output_format', choices=('human', 'yaml', 'json'), default='human',
help="Format to display the list of collections in.")
def add_search_options(self, parser, parents=None):
search_parser = parser.add_parser('search', parents=parents,
help='Search the Galaxy database by tags, platforms, author and multiple '
'keywords.')
search_parser.set_defaults(func=self.execute_search)
search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
search_parser.add_argument('--author', dest='author', help='GitHub username')
search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
def add_import_options(self, parser, parents=None):
import_parser = parser.add_parser('import', parents=parents, help='Import a role into a galaxy server')
import_parser.set_defaults(func=self.execute_import)
import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import results.")
import_parser.add_argument('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch '
'(usually master)')
import_parser.add_argument('--role-name', dest='role_name',
help='The name the role should have, if different than the repo name')
import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_'
'user/github_repo.')
def add_setup_options(self, parser, parents=None):
setup_parser = parser.add_parser('setup', parents=parents,
help='Manage the integration between Galaxy and the given source.')
setup_parser.set_defaults(func=self.execute_setup)
setup_parser.add_argument('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see '
'ID values.')
setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False,
help='List all of your integrations.')
setup_parser.add_argument('source', help='Source')
setup_parser.add_argument('github_user', help='GitHub username')
setup_parser.add_argument('github_repo', help='GitHub repository')
setup_parser.add_argument('secret', help='Secret')
def add_info_options(self, parser, parents=None):
info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.')
info_parser.set_defaults(func=self.execute_info)
info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
def add_verify_options(self, parser, parents=None):
galaxy_type = 'collection'
verify_parser = parser.add_parser('verify', parents=parents, help='Compare checksums with the collection(s) '
'found on the server and the installed copy. This does not verify dependencies.')
verify_parser.set_defaults(func=self.execute_verify)
verify_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', help='The installed collection(s) name. '
'This is mutually exclusive with --requirements-file.')
verify_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors during verification and continue with the next specified collection.')
verify_parser.add_argument('--offline', dest='offline', action='store_true', default=False,
help='Validate collection integrity locally without contacting server for '
'canonical manifest hash.')
verify_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be verified.')
verify_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
help='The keyring used during signature verification') # Eventually default to ~/.ansible/pubring.kbx?
verify_parser.add_argument('--signature', dest='signatures', action='append',
help='An additional signature source to verify the authenticity of the MANIFEST.json before using '
'it to verify the rest of the contents of a collection from a Galaxy server. Use in '
'conjunction with a positional collection name (mutually exclusive with --requirements-file).')
valid_signature_count_help = 'The number of signatures that must successfully verify the collection. This should be a positive integer ' \
'or all to signify that all signatures must be used to verify the collection. ' \
'Prepend the value with + to fail if no valid signatures are found for the collection (e.g. +all).'
ignore_gpg_status_help = 'A status code to ignore during signature verification (for example, NO_PUBKEY). ' \
'Provide this option multiple times to ignore a list of status codes. ' \
'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).'
verify_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
verify_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
def add_install_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
args_kwargs = {}
if galaxy_type == 'collection':
args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \
'mutually exclusive with --requirements-file.'
ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
'collection. This will not ignore dependency conflict errors.'
else:
args_kwargs['help'] = 'Role name, URL or tar file'
ignore_errors_help = 'Ignore errors and continue with the next specified role.'
install_parser = parser.add_parser('install', parents=parents,
help='Install {0}(s) from file(s), URL(s) or Ansible '
'Galaxy'.format(galaxy_type))
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help=ignore_errors_help)
install_exclusive = install_parser.add_mutually_exclusive_group()
install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download {0}s listed as dependencies.".format(galaxy_type))
install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing {0} and its "
"dependencies.".format(galaxy_type))
valid_signature_count_help = 'The number of signatures that must successfully verify the collection. This should be a positive integer ' \
'or -1 to signify that all signatures must be used to verify the collection. ' \
'Prepend the value with + to fail if no valid signatures are found for the collection (e.g. +all).'
ignore_gpg_status_help = 'A status code to ignore during signature verification (for example, NO_PUBKEY). ' \
'Provide this option multiple times to ignore a list of status codes. ' \
'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).'
if galaxy_type == 'collection':
install_parser.add_argument('-p', '--collections-path', dest='collections_path',
default=self._get_default_collection_path(),
help='The path to the directory containing your collections.')
install_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be installed.')
install_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
install_parser.add_argument('-U', '--upgrade', dest='upgrade', action='store_true', default=False,
help='Upgrade installed collection artifacts. This will also update dependencies unless --no-deps is provided')
install_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
help='The keyring used during signature verification') # Eventually default to ~/.ansible/pubring.kbx?
install_parser.add_argument('--disable-gpg-verify', dest='disable_gpg_verify', action='store_true',
default=C.GALAXY_DISABLE_GPG_VERIFY,
help='Disable GPG signature verification when installing collections from a Galaxy server')
install_parser.add_argument('--signature', dest='signatures', action='append',
help='An additional signature source to verify the authenticity of the MANIFEST.json before '
'installing the collection from a Galaxy server. Use in conjunction with a positional '
'collection name (mutually exclusive with --requirements-file).')
install_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
install_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
else:
install_parser.add_argument('-r', '--role-file', dest='requirements',
help='A file containing a list of roles to be installed.')
r_re = re.compile(r'^(?<!-)-[a-zA-Z]*r[a-zA-Z]*') # -r, -fr
contains_r = bool([a for a in self._raw_args if r_re.match(a)])
role_file_re = re.compile(r'--role-file($|=)') # --role-file foo, --role-file=foo
contains_role_file = bool([a for a in self._raw_args if role_file_re.match(a)])
if self._implicit_role and (contains_r or contains_role_file):
# Any collections in the requirements files will also be installed
install_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
help='The keyring used during collection signature verification')
install_parser.add_argument('--disable-gpg-verify', dest='disable_gpg_verify', action='store_true',
default=C.GALAXY_DISABLE_GPG_VERIFY,
help='Disable GPG signature verification when installing collections from a Galaxy server')
install_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
install_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False,
help='Use tar instead of the scm archive option when packaging the role.')
def add_build_options(self, parser, parents=None):
build_parser = parser.add_parser('build', parents=parents,
help='Build an Ansible collection artifact that can be published to Ansible '
'Galaxy.')
build_parser.set_defaults(func=self.execute_build)
build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',),
help='Path to the collection(s) directory to build. This should be the directory '
'that contains the galaxy.yml file. The default is the current working '
'directory.')
build_parser.add_argument('--output-path', dest='output_path', default='./',
help='The path in which the collection is built to. The default is the current '
'working directory.')
def add_publish_options(self, parser, parents=None):
publish_parser = parser.add_parser('publish', parents=parents,
help='Publish a collection artifact to Ansible Galaxy.')
publish_parser.set_defaults(func=self.execute_publish)
publish_parser.add_argument('args', metavar='collection_path',
help='The path to the collection tarball to publish.')
publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import validation results.")
publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0,
help="The time to wait for the collection import process to finish.")
def post_process_args(self, options):
options = super(GalaxyCLI, self).post_process_args(options)
# ensure we have 'usable' cli option
setattr(options, 'validate_certs', (None if options.ignore_certs is None else not options.ignore_certs))
display.verbosity = options.verbosity
return options
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
def server_config_def(section, key, required, option_type):
config_def = {
'description': 'The %s of the %s Galaxy server' % (key, section),
'ini': [
{
'section': 'galaxy_server.%s' % section,
'key': key,
}
],
'env': [
{'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
],
'required': required,
'type': option_type,
}
if key in SERVER_ADDITIONAL:
config_def.update(SERVER_ADDITIONAL[key])
return config_def
galaxy_options = {}
for optional_key in ['clear_response_cache', 'no_cache', 'timeout']:
if optional_key in context.CLIARGS:
galaxy_options[optional_key] = context.CLIARGS[optional_key]
config_servers = []
# Need to filter out empty strings or non truthy values as an empty server list env var is equal to [''].
server_list = [s for s in C.GALAXY_SERVER_LIST or [] if s]
for server_priority, server_key in enumerate(server_list, start=1):
# Abuse the 'plugin config' by making 'galaxy_server' a type of plugin
# Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
# section [galaxy_server.<server>] for the values url, username, password, and token.
config_dict = dict((k, server_config_def(server_key, k, req, ensure_type)) for k, req, ensure_type in SERVER_DEF)
defs = AnsibleLoader(yaml_dump(config_dict)).get_single_data()
C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
# resolve the config created options above with existing config and user options
server_options = C.config.get_plugin_options('galaxy_server', server_key)
# auth_url is used to create the token, but not directly by GalaxyAPI, so
# it doesn't need to be passed as kwarg to GalaxyApi, same for others we pop here
auth_url = server_options.pop('auth_url')
client_id = server_options.pop('client_id')
token_val = server_options['token'] or NoTokenSentinel
username = server_options['username']
v3 = server_options.pop('v3')
validate_certs = server_options['validate_certs']
if v3:
# This allows a user to explicitly indicate the server uses the /v3 API
# This was added for testing against pulp_ansible and I'm not sure it has
# a practical purpose outside of this use case. As such, this option is not
# documented as of now
server_options['available_api_versions'] = {'v3': '/v3'}
# default case if no auth info is provided.
server_options['token'] = None
if username:
server_options['token'] = BasicAuthToken(username, server_options['password'])
else:
if token_val:
if auth_url:
server_options['token'] = KeycloakToken(access_token=token_val,
auth_url=auth_url,
validate_certs=validate_certs,
client_id=client_id)
else:
# The galaxy v1 / github / django / 'Token'
server_options['token'] = GalaxyToken(token=token_val)
server_options.update(galaxy_options)
config_servers.append(GalaxyAPI(
self.galaxy, server_key,
priority=server_priority,
**server_options
))
cmd_server = context.CLIARGS['api_server']
cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
# resolve validate_certs
v_config_default = True if C.GALAXY_IGNORE_CERTS is None else not C.GALAXY_IGNORE_CERTS
validate_certs = v_config_default if context.CLIARGS['validate_certs'] is None else context.CLIARGS['validate_certs']
if cmd_server:
# Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
# entry, otherwise create a new API entry for the server specified.
config_server = next((s for s in config_servers if s.name == cmd_server), None)
if config_server:
self.api_servers.append(config_server)
else:
self.api_servers.append(GalaxyAPI(
self.galaxy, 'cmd_arg', cmd_server, token=cmd_token,
priority=len(config_servers) + 1,
validate_certs=validate_certs,
**galaxy_options
))
else:
self.api_servers = config_servers
# Default to C.GALAXY_SERVER if no servers were defined
if len(self.api_servers) == 0:
self.api_servers.append(GalaxyAPI(
self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token,
priority=0,
validate_certs=validate_certs,
**galaxy_options
))
return context.CLIARGS['func']()
@property
def api(self):
if self._api:
return self._api
for server in self.api_servers:
try:
if u'v1' in server.available_api_versions:
self._api = server
break
except Exception:
continue
if not self._api:
self._api = self.api_servers[0]
return self._api
def _get_default_collection_path(self):
return C.COLLECTIONS_PATHS[0]
def _parse_requirements_file(self, requirements_file, allow_old_format=True, artifacts_manager=None, validate_signature_options=True):
"""
Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
requirements file format:
# v1 (roles only)
- src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball.
name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL.
scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git.
version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master.
include: Path to additional requirements.yml files.
# v2 (roles and collections)
---
roles:
# Same as v1 format just under the roles key
collections:
- namespace.collection
- name: namespace.collection
version: version identifier, multiple identifiers are separated by ','
source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST
type: git|file|url|galaxy
:param requirements_file: The path to the requirements file.
:param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
:param artifacts_manager: Artifacts manager.
:return: a dict containing roles and collections to found in the requirements file.
"""
requirements = {
'roles': [],
'collections': [],
}
b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict')
if not os.path.exists(b_requirements_file):
raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file))
display.vvv("Reading requirement file at '%s'" % requirements_file)
with open(b_requirements_file, 'rb') as req_obj:
try:
file_requirements = yaml_load(req_obj)
except YAMLError as err:
raise AnsibleError(
"Failed to parse the requirements yml at '%s' with the following error:\n%s"
% (to_native(requirements_file), to_native(err)))
if file_requirements is None:
raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file))
def parse_role_req(requirement):
if "include" not in requirement:
role = RoleRequirement.role_yaml_parse(requirement)
display.vvv("found role %s in yaml file" % to_text(role))
if "name" not in role and "src" not in role:
raise AnsibleError("Must specify name or src for role")
return [GalaxyRole(self.galaxy, self.api, **role)]
else:
b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict")
if not os.path.isfile(b_include_path):
raise AnsibleError("Failed to find include requirements file '%s' in '%s'"
% (to_native(b_include_path), to_native(requirements_file)))
with open(b_include_path, 'rb') as f_include:
try:
return [GalaxyRole(self.galaxy, self.api, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml_load(f_include))]
except Exception as e:
raise AnsibleError("Unable to load data from include requirements file: %s %s"
% (to_native(requirements_file), to_native(e)))
if isinstance(file_requirements, list):
# Older format that contains only roles
if not allow_old_format:
raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains "
"a list of collections to install")
for role_req in file_requirements:
requirements['roles'] += parse_role_req(role_req)
else:
# Newer format with a collections and/or roles key
extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections']))
if extra_keys:
raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements "
"file. Found: %s" % (to_native(", ".join(extra_keys))))
for role_req in file_requirements.get('roles') or []:
requirements['roles'] += parse_role_req(role_req)
requirements['collections'] = [
Requirement.from_requirement_dict(
self._init_coll_req_dict(collection_req),
artifacts_manager,
validate_signature_options,
)
for collection_req in file_requirements.get('collections') or []
]
return requirements
def _init_coll_req_dict(self, coll_req):
if not isinstance(coll_req, dict):
# Assume it's a string:
return {'name': coll_req}
if (
'name' not in coll_req or
not coll_req.get('source') or
coll_req.get('type', 'galaxy') != 'galaxy'
):
return coll_req
# Try and match up the requirement source with our list of Galaxy API
# servers defined in the config, otherwise create a server with that
# URL without any auth.
coll_req['source'] = next(
iter(
srvr for srvr in self.api_servers
if coll_req['source'] in {srvr.name, srvr.api_server}
),
GalaxyAPI(
self.galaxy,
'explicit_requirement_{name!s}'.format(
name=coll_req['name'],
),
coll_req['source'],
validate_certs=not context.CLIARGS['ignore_certs'],
),
)
return coll_req
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
# Get the top-level 'description' first, falling back to galaxy_info['galaxy_info']['description'].
galaxy_info = role_info.get('galaxy_info', {})
description = role_info.get('description', galaxy_info.get('description', ''))
text.append(u"\tdescription: %s" % description)
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
# make sure we have a trailing newline returned
text.append(u"")
return u'\n'.join(text)
@staticmethod
def _resolve_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
@staticmethod
def _get_skeleton_galaxy_yml(template_path, inject_data):
with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
galaxy_meta = get_collections_galaxy_meta_info()
required_config = []
optional_config = []
for meta_entry in galaxy_meta:
config_list = required_config if meta_entry.get('required', False) else optional_config
value = inject_data.get(meta_entry['key'], None)
if not value:
meta_type = meta_entry.get('type', 'str')
if meta_type == 'str':
value = ''
elif meta_type == 'list':
value = []
elif meta_type == 'dict':
value = {}
meta_entry['value'] = value
config_list.append(meta_entry)
link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
const_pattern = re.compile(r"C\(([^)]+)\)")
def comment_ify(v):
if isinstance(v, list):
v = ". ".join([l.rstrip('.') for l in v])
v = link_pattern.sub(r"\1 <\2>", v)
v = const_pattern.sub(r"'\1'", v)
return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
loader = DataLoader()
templar = Templar(loader, variables={'required_config': required_config, 'optional_config': optional_config})
templar.environment.filters['comment_ify'] = comment_ify
meta_value = templar.template(meta_template)
return meta_value
def _require_one_of_collections_requirements(
self, collections, requirements_file,
signatures=None,
artifacts_manager=None,
):
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
elif requirements_file:
if signatures is not None:
raise AnsibleError(
"The --signatures option and --requirements-file are mutually exclusive. "
"Use the --signatures with positional collection_name args or provide a "
"'signatures' key for requirements in the --requirements-file."
)
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._parse_requirements_file(
requirements_file,
allow_old_format=False,
artifacts_manager=artifacts_manager,
)
else:
requirements = {
'collections': [
Requirement.from_string(coll_input, artifacts_manager, signatures)
for coll_input in collections
],
'roles': [],
}
return requirements
############################
# execute actions
############################
def execute_role(self):
"""
Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
as listed below.
"""
# To satisfy doc build
pass
def execute_collection(self):
"""
Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
listed below.
"""
# To satisfy doc build
pass
def execute_build(self):
"""
Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
By default, this command builds from the current working directory. You can optionally pass in the
collection input path (where the ``galaxy.yml`` file is).
"""
force = context.CLIARGS['force']
output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
elif os.path.isfile(b_output_path):
raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
for collection_path in context.CLIARGS['args']:
collection_path = GalaxyCLI._resolve_path(collection_path)
build_collection(
to_text(collection_path, errors='surrogate_or_strict'),
to_text(output_path, errors='surrogate_or_strict'),
force,
)
@with_collection_artifacts_manager
def execute_download(self, artifacts_manager=None):
collections = context.CLIARGS['args']
no_deps = context.CLIARGS['no_deps']
download_path = context.CLIARGS['download_path']
requirements_file = context.CLIARGS['requirements']
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._require_one_of_collections_requirements(
collections, requirements_file,
artifacts_manager=artifacts_manager,
)['collections']
download_path = GalaxyCLI._resolve_path(download_path)
b_download_path = to_bytes(download_path, errors='surrogate_or_strict')
if not os.path.exists(b_download_path):
os.makedirs(b_download_path)
download_collections(
requirements, download_path, self.api_servers, no_deps,
context.CLIARGS['allow_pre_release'],
artifacts_manager=artifacts_manager,
)
return 0
def execute_init(self):
"""
Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
"""
galaxy_type = context.CLIARGS['type']
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
inject_data = dict(
description='your {0} description'.format(galaxy_type),
ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
)
if galaxy_type == 'role':
inject_data.update(dict(
author='your name',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
role_name=obj_name,
role_type=context.CLIARGS['role_type'],
issue_tracker_url='http://example.com/issue/tracker',
repository_url='http://example.com/repository',
documentation_url='http://docs.example.com',
homepage_url='http://example.com',
min_ansible_version=ansible_version[:3], # x.y
dependencies=[],
))
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
obj_path = os.path.join(init_path, obj_name)
elif galaxy_type == 'collection':
namespace, collection_name = obj_name.split('.', 1)
inject_data.update(dict(
namespace=namespace,
collection_name=collection_name,
version='1.0.0',
readme='README.md',
authors=['your name <[email protected]>'],
license=['GPL-2.0-or-later'],
repository='http://example.com/repository',
documentation='http://docs.example.com',
homepage='http://example.com',
issues='http://example.com/issue/tracker',
build_ignore=[],
))
skeleton_ignore_expressions = C.GALAXY_COLLECTION_SKELETON_IGNORE
obj_path = os.path.join(init_path, namespace, collection_name)
b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
if os.path.exists(b_obj_path):
if os.path.isfile(obj_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
elif not force:
raise AnsibleError("- the directory %s already exists. "
"You can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % to_native(obj_path))
# delete the contents rather than the collection root in case init was run from the root (--init-path ../../)
for root, dirs, files in os.walk(b_obj_path, topdown=True):
for old_dir in dirs:
path = os.path.join(root, old_dir)
shutil.rmtree(path)
for old_file in files:
path = os.path.join(root, old_file)
os.unlink(path)
if obj_skeleton is not None:
own_skeleton = False
else:
own_skeleton = True
obj_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
obj_skeleton = os.path.expanduser(obj_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
if not os.path.exists(obj_skeleton):
raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
to_native(obj_skeleton), galaxy_type)
)
loader = DataLoader()
templar = Templar(loader, variables=inject_data)
# create role directory
if not os.path.exists(b_obj_path):
os.makedirs(b_obj_path)
for root, dirs, files in os.walk(obj_skeleton, topdown=True):
rel_root = os.path.relpath(root, obj_skeleton)
rel_dirs = rel_root.split(os.sep)
rel_root_dir = rel_dirs[0]
if galaxy_type == 'collection':
# A collection can contain templates in playbooks/*/templates and roles/*/templates
in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
else:
in_templates_dir = rel_root_dir == 'templates'
# Filter out ignored directory names
# Use [:] to mutate the list os.walk uses
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
if galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
# Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
# dynamically which requires special options to be set.
# The templated data's keys must match the key name but the inject data contains collection_name
# instead of name. We just make a copy and change the key back to name for this file.
template_data = inject_data.copy()
template_data['name'] = template_data.pop('collection_name')
meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
with open(b_dest_file, 'wb') as galaxy_obj:
galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
template_data = to_text(loader._get_file_contents(src_template)[0], errors='surrogate_or_strict')
b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict')
with open(dest_file, 'wb') as df:
df.write(b_rendered)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path))
for d in dirs:
b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
if not os.path.exists(b_dir_path):
os.makedirs(b_dir_path)
display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, self.api, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
if not context.CLIARGS['offline']:
remote_data = None
try:
remote_data = self.api.lookup_role_by_name(role, False)
except AnsibleError as e:
if e.http_code == 400 and 'Bad Request' in e.message:
# Role does not exist in Ansible Galaxy
data = u"- the role %s was not found" % role
break
raise AnsibleError("Unable to find info about '%s': %s" % (role, e))
if remote_data:
role_info.update(remote_data)
elif context.CLIARGS['offline'] and not gr._exists:
data = u"- the role %s was not found" % role
break
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data += self._display_role_info(role_info)
self.pager(data)
@with_collection_artifacts_manager
def execute_verify(self, artifacts_manager=None):
collections = context.CLIARGS['args']
search_paths = context.CLIARGS['collections_path']
ignore_errors = context.CLIARGS['ignore_errors']
local_verify_only = context.CLIARGS['offline']
requirements_file = context.CLIARGS['requirements']
signatures = context.CLIARGS['signatures']
if signatures is not None:
signatures = list(signatures)
requirements = self._require_one_of_collections_requirements(
collections, requirements_file,
signatures=signatures,
artifacts_manager=artifacts_manager,
)['collections']
resolved_paths = [validate_collection_path(GalaxyCLI._resolve_path(path)) for path in search_paths]
results = verify_collections(
requirements, resolved_paths,
self.api_servers, ignore_errors,
local_verify_only=local_verify_only,
artifacts_manager=artifacts_manager,
)
if any(result for result in results if not result.success):
return 1
return 0
@with_collection_artifacts_manager
def execute_install(self, artifacts_manager=None):
"""
Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
You can pass in a list (roles or collections) or use the file
option listed below (these are mutually exclusive). If you pass in a list, it
can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
:param artifacts_manager: Artifacts manager.
"""
install_items = context.CLIARGS['args']
requirements_file = context.CLIARGS['requirements']
collection_path = None
signatures = context.CLIARGS.get('signatures')
if signatures is not None:
signatures = list(signatures)
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
two_type_warning = "The requirements file '%s' contains {0}s which will be ignored. To install these {0}s " \
"run 'ansible-galaxy {0} install -r' or to install both at the same time run " \
"'ansible-galaxy install -r' without a custom install path." % to_text(requirements_file)
# TODO: Would be nice to share the same behaviour with args and -r in collections and roles.
collection_requirements = []
role_requirements = []
if context.CLIARGS['type'] == 'collection':
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['collections_path'])
requirements = self._require_one_of_collections_requirements(
install_items, requirements_file,
signatures=signatures,
artifacts_manager=artifacts_manager,
)
collection_requirements = requirements['collections']
if requirements['roles']:
display.vvv(two_type_warning.format('role'))
else:
if not install_items and requirements_file is None:
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
if requirements_file:
if not (requirements_file.endswith('.yaml') or requirements_file.endswith('.yml')):
raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
galaxy_args = self._raw_args
will_install_collections = self._implicit_role and '-p' not in galaxy_args and '--roles-path' not in galaxy_args
requirements = self._parse_requirements_file(
requirements_file,
artifacts_manager=artifacts_manager,
validate_signature_options=will_install_collections,
)
role_requirements = requirements['roles']
# We can only install collections and roles at the same time if the type wasn't specified and the -p
# argument was not used. If collections are present in the requirements then at least display a msg.
if requirements['collections'] and (not self._implicit_role or '-p' in galaxy_args or
'--roles-path' in galaxy_args):
# We only want to display a warning if 'ansible-galaxy install -r ... -p ...'. Other cases the user
# was explicit about the type and shouldn't care that collections were skipped.
display_func = display.warning if self._implicit_role else display.vvv
display_func(two_type_warning.format('collection'))
else:
collection_path = self._get_default_collection_path()
collection_requirements = requirements['collections']
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
role_requirements.append(GalaxyRole(self.galaxy, self.api, **role))
if not role_requirements and not collection_requirements:
display.display("Skipping install, no requirements found")
return
if role_requirements:
display.display("Starting galaxy role install process")
self._execute_install_role(role_requirements)
if collection_requirements:
display.display("Starting galaxy collection install process")
# Collections can technically be installed even when ansible-galaxy is in role mode so we need to pass in
# the install path as context.CLIARGS['collections_path'] won't be set (default is calculated above).
self._execute_install_collection(
collection_requirements, collection_path,
artifacts_manager=artifacts_manager,
)
def _execute_install_collection(
self, requirements, path, artifacts_manager,
):
force = context.CLIARGS['force']
ignore_errors = context.CLIARGS['ignore_errors']
no_deps = context.CLIARGS['no_deps']
force_with_deps = context.CLIARGS['force_with_deps']
try:
disable_gpg_verify = context.CLIARGS['disable_gpg_verify']
except KeyError:
if self._implicit_role:
raise AnsibleError(
'Unable to properly parse command line arguments. Please use "ansible-galaxy collection install" '
'instead of "ansible-galaxy install".'
)
raise
# If `ansible-galaxy install` is used, collection-only options aren't available to the user and won't be in context.CLIARGS
allow_pre_release = context.CLIARGS.get('allow_pre_release', False)
upgrade = context.CLIARGS.get('upgrade', False)
collections_path = C.COLLECTIONS_PATHS
if len([p for p in collections_path if p.startswith(path)]) == 0:
display.warning("The specified collections path '%s' is not part of the configured Ansible "
"collections paths '%s'. The installed collection will not be picked up in an Ansible "
"run, unless within a playbook-adjacent collections directory." % (to_text(path), to_text(":".join(collections_path))))
output_path = validate_collection_path(path)
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
install_collections(
requirements, output_path, self.api_servers, ignore_errors,
no_deps, force, force_with_deps, upgrade,
allow_pre_release=allow_pre_release,
artifacts_manager=artifacts_manager,
disable_gpg_verify=disable_gpg_verify,
)
return 0
def _execute_install_role(self, requirements):
role_file = context.CLIARGS['requirements']
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
force = context.CLIARGS['force'] or force_deps
for role in requirements:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
# NOTE: the meta file is also required for installing the role, not just dependencies
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata_dependencies + role.requirements
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, self.api, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in requirements:
display.display('- adding dependency: %s' % to_text(dep_role))
requirements.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependent role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
requirements.append(dep_role)
else:
display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
else:
if force_deps:
requirements.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, self.api, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
List installed collections or roles
"""
if context.CLIARGS['type'] == 'role':
self.execute_list_role()
elif context.CLIARGS['type'] == 'collection':
self.execute_list_collection()
def execute_list_role(self):
"""
List all roles installed on the local system or a specific role
"""
path_found = False
role_found = False
warnings = []
roles_search_paths = context.CLIARGS['roles_path']
role_name = context.CLIARGS['role']
for path in roles_search_paths:
role_path = GalaxyCLI._resolve_path(path)
if os.path.isdir(path):
path_found = True
else:
warnings.append("- the configured path {0} does not exist.".format(path))
continue
if role_name:
# show the requested role, if it exists
gr = GalaxyRole(self.galaxy, self.api, role_name, path=os.path.join(role_path, role_name))
if os.path.isdir(gr.path):
role_found = True
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
break
warnings.append("- the role %s was not found" % role_name)
else:
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
if not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, self.api, path_file, path=path)
if gr.metadata:
_display_role(gr)
# Do not warn if the role was found in any of the search paths
if role_found and role_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
return 0
@with_collection_artifacts_manager
def execute_list_collection(self, artifacts_manager=None):
"""
List all collections installed on the local system
:param artifacts_manager: Artifacts manager.
"""
if artifacts_manager is not None:
artifacts_manager.require_build_metadata = False
output_format = context.CLIARGS['output_format']
collections_search_paths = set(context.CLIARGS['collections_path'])
collection_name = context.CLIARGS['collection']
default_collections_path = AnsibleCollectionConfig.collection_paths
collections_in_paths = {}
warnings = []
path_found = False
collection_found = False
for path in collections_search_paths:
collection_path = GalaxyCLI._resolve_path(path)
if not os.path.exists(path):
if path in default_collections_path:
# don't warn for missing default paths
continue
warnings.append("- the configured path {0} does not exist.".format(collection_path))
continue
if not os.path.isdir(collection_path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
continue
path_found = True
if collection_name:
# list a specific collection
validate_collection_name(collection_name)
namespace, collection = collection_name.split('.')
collection_path = validate_collection_path(collection_path)
b_collection_path = to_bytes(os.path.join(collection_path, namespace, collection), errors='surrogate_or_strict')
if not os.path.exists(b_collection_path):
warnings.append("- unable to find {0} in collection paths".format(collection_name))
continue
if not os.path.isdir(collection_path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
continue
collection_found = True
try:
collection = Requirement.from_dir_path_as_unknown(
b_collection_path,
artifacts_manager,
)
except ValueError as val_err:
six.raise_from(AnsibleError(val_err), val_err)
if output_format in {'yaml', 'json'}:
collections_in_paths[collection_path] = {
collection.fqcn: {'version': collection.ver}
}
continue
fqcn_width, version_width = _get_collection_widths([collection])
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
_display_collection(collection, fqcn_width, version_width)
else:
# list all collections
collection_path = validate_collection_path(path)
if os.path.isdir(collection_path):
display.vvv("Searching {0} for collections".format(collection_path))
collections = list(find_existing_collections(
collection_path, artifacts_manager,
))
else:
# There was no 'ansible_collections/' directory in the path, so there
# or no collections here.
display.vvv("No 'ansible_collections' directory found at {0}".format(collection_path))
continue
if not collections:
display.vvv("No collections found at {0}".format(collection_path))
continue
if output_format in {'yaml', 'json'}:
collections_in_paths[collection_path] = {
collection.fqcn: {'version': collection.ver} for collection in collections
}
continue
# Display header
fqcn_width, version_width = _get_collection_widths(collections)
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
# Sort collections by the namespace and name
for collection in sorted(collections, key=to_text):
_display_collection(collection, fqcn_width, version_width)
# Do not warn if the specific collection was found in any of the search paths
if collection_found and collection_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
if output_format == 'json':
display.display(json.dumps(collections_in_paths))
elif output_format == 'yaml':
display.display(yaml_dump(collections_in_paths))
return 0
def execute_publish(self):
"""
Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
"""
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
wait = context.CLIARGS['wait']
timeout = context.CLIARGS['import_timeout']
publish_collection(collection_path, self.api, wait, timeout)
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
source = context.CLIARGS['source']
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
secret = context.CLIARGS['secret']
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
def main(args=None):
GalaxyCLI.cli_executor(args)
if __name__ == '__main__':
main()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,588 |
"The module xxxx/library/my_test.py was not found in configured module paths"
|
### Summary
Hi there,
Not too sure what's going on as it seems like everything on my end looks well. I tried following this Tutorial: [https://docs.ansible.com/ansible/latest/dev_guide/developing_modules_general.html](url) and it has lead me in receiving back the following error message:
```yml
[WARNING]: No inventory was parsed, only implicit localhost is available
localhost | FAILED! => {
"msg": "The module xxxxx/library/my_test.py was not found in configured module paths"
}
```
Okay? Maybe I have to set something up differently in my config file.
I then went ahead and set my ansible.cfg file to the following:
```yml
[defaults]
library = ./library
```
Tried looking for this issue everywhere else but doesn't seem to be reported -- as much...
Would appreciate some guidance / help.
Thanks.
### Issue Type
Bug Report
### Component Name
Developing Modules
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.3]
config file = /Users/xxxx/Desktop/Projects/project-destination/ansible.cfg
configured module search path = ['/Users/xxxx/Desktop/Projects/project-destination/library']
ansible python module location = /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/ansible
ansible collection location = /Users/xxxx/.ansible/collections:/usr/share/ansible/collections
executable location = /Library/Frameworks/Python.framework/Versions/3.10/bin/ansible
python version = 3.10.2 (v3.10.2:a58ebcc701, Jan 13 2022, 14:50:16) [Clang 13.0.0 (clang-1300.0.29.30)]
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
DEFAULT_MODULE_PATH(/Users/xxxx/Desktop/Projects/project-destination/ansible.cfg) = ['/Users/xxxx/Desktop/Projects/project-destination/library']
BECOME:
======
CACHE:
=====
CALLBACK:
========
CLICONF:
=======
CONNECTION:
==========
HTTPAPI:
=======
INVENTORY:
=========
LOOKUP:
======
NETCONF:
=======
SHELL:
=====
VARS:
====
```
### OS / Environment
Mac OS
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
ansible -m xxxx/library/my_test.py -a 'name=hello new=true' localhost
```
### Expected Results
Expecting back this output:
```yml
{"changed": true, "state": {"original_message": "hello", "new_message": "goodbye"}, "invocation": {"module_args": {"name": "hello", "new": true}}}
```
### Actual Results
```console
[WARNING]: No inventory was parsed, only implicit localhost is available
localhost | FAILED! => {
"msg": "The module xxxxx/library/my_test.py was not found in configured module paths"
}
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78588
|
https://github.com/ansible/ansible/pull/78593
|
beb70daf14bc14acad076b34dca39a3ca6b633bb
|
5ac40b358d7d64e4169813e400c19f869b17183c
| 2022-08-18T23:39:45Z |
python
| 2022-08-19T19:06:03Z |
docs/docsite/rst/dev_guide/developing_modules_general.rst
|
.. _developing_modules_general:
.. _module_dev_tutorial_sample:
******************
Developing modules
******************
A module is a reusable, standalone script that Ansible runs on your behalf, either locally or remotely. Modules interact with your local machine, an API, or a remote system to perform specific tasks like changing a database password or spinning up a cloud instance. Each module can be used by the Ansible API, or by the :command:`ansible` or :command:`ansible-playbook` programs. A module provides a defined interface, accepts arguments, and returns information to Ansible by printing a JSON string to stdout before exiting.
If you need functionality that is not available in any of the thousands of Ansible modules found in collections, you can easily write your own custom module. When you write a module for local use, you can choose any programming language and follow your own rules. Use this topic to learn how to create an Ansible module in Python. After you create a module, you must add it locally to the appropriate directory so that Ansible can find and execute it. For details about adding a module locally, see :ref:`developing_locally`.
If you are developing a module in a :ref:`collection <developing_collections>`, see those documents instead.
.. contents::
:local:
.. _environment_setup:
Preparing an environment for developing Ansible modules
=======================================================
You just need ``ansible-core`` installed to test the module. Modules can be written in any language,
but most of the following guide is assuming you are using Python.
Modules for inclusion in Ansible itself must be Python or Powershell.
One advantage of using Python or Powershell for your custom modules is being able to use the ``module_utils`` common code that does a lot of the
heavy lifting for argument processing, logging and response writing, among other things.
Creating a module
=================
It is highly recommended that you use a ``venv`` or ``virtualenv`` for Python development.
To create a module:
1. Create a ``library`` directory in your workspace, your test play should live in the same directory.
2. Create your new module file: ``$ touch library/my_test.py``. Or just open/create it with your editor of choice.
3. Paste the content below into your new module file. It includes the :ref:`required Ansible format and documentation <developing_modules_documenting>`, a simple :ref:`argument spec for declaring the module options <argument_spec>`, and some example code.
4. Modify and extend the code to do what you want your new module to do. See the :ref:`programming tips <developing_modules_best_practices>` and :ref:`Python 3 compatibility <developing_python_3>` pages for pointers on writing clean and concise module code.
.. literalinclude:: ../../../../examples/scripts/my_test.py
:language: python
Creating an info or a facts module
==================================
Ansible gathers information about the target machines using facts modules, and gathers information on other objects or files using info modules.
If you find yourself trying to add ``state: info`` or ``state: list`` to an existing module, that is often a sign that a new dedicated ``_facts`` or ``_info`` module is needed.
In Ansible 2.8 and onwards, we have two type of information modules, they are ``*_info`` and ``*_facts``.
If a module is named ``<something>_facts``, it should be because its main purpose is returning ``ansible_facts``. Do not name modules that do not do this with ``_facts``.
Only use ``ansible_facts`` for information that is specific to the host machine, for example network interfaces and their configuration, which operating system and which programs are installed.
Modules that query/return general information (and not ``ansible_facts``) should be named ``_info``.
General information is non-host specific information, for example information on online/cloud services (you can access different accounts for the same online service from the same host), or information on VMs and containers accessible from the machine, or information on individual files or programs.
Info and facts modules, are just like any other Ansible Module, with a few minor requirements:
1. They MUST be named ``<something>_info`` or ``<something>_facts``, where <something> is singular.
2. Info ``*_info`` modules MUST return in the form of the :ref:`result dictionary<common_return_values>` so other modules can access them.
3. Fact ``*_facts`` modules MUST return in the ``ansible_facts`` field of the :ref:`result dictionary<common_return_values>` so other modules can access them.
4. They MUST support :ref:`check_mode <check_mode_dry>`.
5. They MUST NOT make any changes to the system.
6. They MUST document the :ref:`return fields<return_block>` and :ref:`examples<examples_block>`.
The rest is just like creating a normal module.
Verifying your module code
==========================
After you modify the sample code above to do what you want, you can try out your module.
Our :ref:`debugging tips <debugging_modules>` will help if you run into bugs as you verify your module code.
Verifying your module code locally
----------------------------------
The simplest way is to use ``ansible`` adhoc command:
.. code:: shell
ansible -m library/my_test.py -a 'name=hello new=true' remotehost
If your module does not need to target a remote host, you can quickly and easily exercise your code locally like this:
.. code:: shell
ansible -m library/my_test.py -a 'name=hello new=true' localhost
- If for any reason (pdb, using print(), faster iteration, etc) you want to avoid going through Ansible,
another way is to create an arguments file, a basic JSON config file that passes parameters to your module so that you can run it.
Name the arguments file ``/tmp/args.json`` and add the following content:
.. code:: json
{
"ANSIBLE_MODULE_ARGS": {
"name": "hello",
"new": true
}
}
- Then the module can be tested locally and directly. This skips the packing steps and uses module_utils files directly:
.. code:: console
``$ python library/my_test.py /tmp/args.json``
It should return output like this:
.. code:: json
{"changed": true, "state": {"original_message": "hello", "new_message": "goodbye"}, "invocation": {"module_args": {"name": "hello", "new": true}}}
Verifying your module code in a playbook
----------------------------------------
You can easily run a full test by including it in a playbook, as long as the ``library`` directory is in the same directory as the play:
- Create a playbook in any directory: ``$ touch testmod.yml``
- Add the following to the new playbook file:
.. code-block:: yaml
- name: test my new module
hosts: localhost
tasks:
- name: run the new module
my_test:
name: 'hello'
new: true
register: testout
- name: dump test output
debug:
msg: '{{ testout }}'
- Run the playbook and analyze the output: ``$ ansible-playbook ./testmod.yml``
Testing your newly-created module
=================================
The following two examples will get you started with testing your module code. Please review our :ref:`testing <developing_testing>` section for more detailed
information, including instructions for :ref:`testing module documentation <testing_module_documentation>`, adding :ref:`integration tests <testing_integration>`, and more.
.. note::
If contributing to Ansible, every new module and plugin should have integration tests, even if the tests cannot be run on Ansible CI infrastructure.
In this case, the tests should be marked with the ``unsupported`` alias in `aliases file <https://docs.ansible.com/ansible/latest/dev_guide/testing/sanity/integration-aliases.html>`_.
Performing sanity tests
-----------------------
You can run through Ansible's sanity checks in a container:
``$ ansible-test sanity -v --docker --python 3.10 MODULE_NAME``
.. note::
Note that this example requires Docker to be installed and running. If you'd rather not use a container for this, you can choose to use ``--venv`` instead of ``--docker``.
Contributing back to Ansible
============================
If you would like to contribute to ``ansible-core`` by adding a new feature or fixing a bug, `create a fork <https://help.github.com/articles/fork-a-repo/>`_ of the ansible/ansible repository and develop against a new feature branch using the ``devel`` branch as a starting point. When you have a good working code change, you can submit a pull request to the Ansible repository by selecting your feature branch as a source and the Ansible devel branch as a target.
If you want to contribute a module to an :ref:`Ansible collection <contributing_maintained_collections>`, review our :ref:`submission checklist <developing_modules_checklist>`, :ref:`programming tips <developing_modules_best_practices>`, and :ref:`strategy for maintaining Python 2 and Python 3 compatibility <developing_python_3>`, as well as information about :ref:`testing <developing_testing>` before you open a pull request.
The :ref:`Community Guide <ansible_community_guide>` covers how to open a pull request and what happens next.
Communication and development support
=====================================
Join the ``#ansible-devel`` chat channel (using Matrix at ansible.im or using IRC at `irc.libera.chat <https://libera.chat/>`_) for discussions surrounding Ansible development.
For questions and discussions pertaining to using the Ansible product, join the ``#ansible`` channel.
To find other topic-specific chat channels, look at :ref:`Community Guide, Communicating <communication_irc>`.
Credit
======
Thank you to Thomas Stringer (`@trstringer <https://github.com/trstringer>`_) for contributing source
material for this topic.
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,133 |
apt_repository module requires gpg to be installed target, but docs don't mention
|
### Summary
The `apt_repository` will fail with a somewhat confusing error if gnugp, and dirmngr manager are not installed. Neither of these packages are Required/Essential/Important, and may not be already installed in minimal systems that you might want to run `apt_repository` on.
The fix might be to just include `gnupg` in the requirements section of the docs.
### Issue Type
Documentation Report
### Component Name
apt_repository
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.4]
config file = ...
configured module search path = ...
ansible python module location = ...
ansible collection location = ...
executable location = /usr/local/bin/ansible
python version = 3.9.2 (default, Feb 28 2021, 17:03:44) [GCC 10.2.1 20210110]
jinja version = 3.0.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
Mostly a docs issue so this is N/A. But apt_repository applies to Debian, Ubuntu, and forks.
### Additional Information
Tells people that need gnupg when using apt_repository.
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76133
|
https://github.com/ansible/ansible/pull/78586
|
f1c56e988dbbb769b34a3c80baa40c916b4d0c88
|
3b9f1871e512ffc4106348cadb2ea369eb8b4fd0
| 2021-10-25T21:01:38Z |
python
| 2022-08-23T15:06:11Z |
lib/ansible/modules/apt_repository.py
|
# encoding: utf-8
# Copyright: (c) 2012, Matt Wright <[email protected]>
# Copyright: (c) 2013, Alexander Saltanov <[email protected]>
# Copyright: (c) 2014, Rutger Spiertz <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: apt_repository
short_description: Add and remove APT repositories
description:
- Add or remove an APT repositories in Ubuntu and Debian.
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: debian
notes:
- This module supports Debian Squeeze (version 6) as well as its successors and derivatives.
options:
repo:
description:
- A source string for the repository.
type: str
required: true
state:
description:
- A source string state.
type: str
choices: [ absent, present ]
default: "present"
mode:
description:
- The octal mode for newly created files in sources.list.d.
- Default is what system uses (probably 0644).
type: raw
version_added: "1.6"
update_cache:
description:
- Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes.
type: bool
default: "yes"
aliases: [ update-cache ]
update_cache_retries:
description:
- Amount of retries if the cache update fails. Also see I(update_cache_retry_max_delay).
type: int
default: 5
version_added: '2.10'
update_cache_retry_max_delay:
description:
- Use an exponential backoff delay for each retry (see I(update_cache_retries)) up to this max delay in seconds.
type: int
default: 12
version_added: '2.10'
validate_certs:
description:
- If C(no), SSL certificates for the target repo will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
version_added: '1.8'
filename:
description:
- Sets the name of the source list file in sources.list.d.
Defaults to a file name based on the repository source url.
The .list extension will be automatically added.
type: str
version_added: '2.1'
codename:
description:
- Override the distribution codename to use for PPA repositories.
Should usually only be set when working with a PPA on
a non-Ubuntu target (for example, Debian or Mint).
type: str
version_added: '2.3'
install_python_apt:
description:
- Whether to automatically try to install the Python apt library or not, if it is not already installed.
Without this library, the module does not work.
- Runs C(apt-get install python-apt) for Python 2, and C(apt-get install python3-apt) for Python 3.
- Only works with the system Python 2 or Python 3. If you are using a Python on the remote that is not
the system Python, set I(install_python_apt=false) and ensure that the Python apt library
for your Python version is installed some other way.
type: bool
default: true
author:
- Alexander Saltanov (@sashka)
version_added: "0.7"
requirements:
- python-apt (python 2)
- python3-apt (python 3)
'''
EXAMPLES = '''
- name: Add specified repository into sources list
ansible.builtin.apt_repository:
repo: deb http://archive.canonical.com/ubuntu hardy partner
state: present
- name: Add specified repository into sources list using specified filename
ansible.builtin.apt_repository:
repo: deb http://dl.google.com/linux/chrome/deb/ stable main
state: present
filename: google-chrome
- name: Add source repository into sources list
ansible.builtin.apt_repository:
repo: deb-src http://archive.canonical.com/ubuntu hardy partner
state: present
- name: Remove specified repository from sources list
ansible.builtin.apt_repository:
repo: deb http://archive.canonical.com/ubuntu hardy partner
state: absent
- name: Add nginx stable repository from PPA and install its signing key on Ubuntu target
ansible.builtin.apt_repository:
repo: ppa:nginx/stable
- name: Add nginx stable repository from PPA and install its signing key on Debian target
ansible.builtin.apt_repository:
repo: 'ppa:nginx/stable'
codename: trusty
- name: One way to avoid apt_key once it is removed from your distro
block:
- name: somerepo |no apt key
ansible.builtin.get_url:
url: https://download.example.com/linux/ubuntu/gpg
dest: /etc/apt/trusted.gpg.d/somerepo.asc
- name: somerepo | apt source
ansible.builtin.apt_repository:
repo: "deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/somerepo.asc] https://download.example.com/linux/ubuntu {{ ansible_distribution_release }} stable"
state: present
'''
RETURN = '''#'''
import copy
import glob
import json
import os
import re
import sys
import tempfile
import random
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils._text import to_native
from ansible.module_utils.six import PY3
from ansible.module_utils.urls import fetch_url
try:
import apt
import apt_pkg
import aptsources.distro as aptsources_distro
distro = aptsources_distro.get_distro()
HAVE_PYTHON_APT = True
except ImportError:
apt = apt_pkg = aptsources_distro = distro = None
HAVE_PYTHON_APT = False
DEFAULT_SOURCES_PERM = 0o0644
VALID_SOURCE_TYPES = ('deb', 'deb-src')
def install_python_apt(module, apt_pkg_name):
if not module.check_mode:
apt_get_path = module.get_bin_path('apt-get')
if apt_get_path:
rc, so, se = module.run_command([apt_get_path, 'update'])
if rc != 0:
module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (apt_pkg_name, se.strip()))
rc, so, se = module.run_command([apt_get_path, 'install', apt_pkg_name, '-y', '-q'])
if rc != 0:
module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (apt_pkg_name, se.strip()))
else:
module.fail_json(msg="%s must be installed to use check mode" % apt_pkg_name)
class InvalidSource(Exception):
pass
# Simple version of aptsources.sourceslist.SourcesList.
# No advanced logic and no backups inside.
class SourcesList(object):
def __init__(self, module):
self.module = module
self.files = {} # group sources by file
# Repositories that we're adding -- used to implement mode param
self.new_repos = set()
self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist')
# read sources.list if it exists
if os.path.isfile(self.default_file):
self.load(self.default_file)
# read sources.list.d
for file in glob.iglob('%s/*.list' % self._apt_cfg_dir('Dir::Etc::sourceparts')):
self.load(file)
def __iter__(self):
'''Simple iterator to go over all sources. Empty, non-source, and other not valid lines will be skipped.'''
for file, sources in self.files.items():
for n, valid, enabled, source, comment in sources:
if valid:
yield file, n, enabled, source, comment
def _expand_path(self, filename):
if '/' in filename:
return filename
else:
return os.path.abspath(os.path.join(self._apt_cfg_dir('Dir::Etc::sourceparts'), filename))
def _suggest_filename(self, line):
def _cleanup_filename(s):
filename = self.module.params['filename']
if filename is not None:
return filename
return '_'.join(re.sub('[^a-zA-Z0-9]', ' ', s).split())
def _strip_username_password(s):
if '@' in s:
s = s.split('@', 1)
s = s[-1]
return s
# Drop options and protocols.
line = re.sub(r'\[[^\]]+\]', '', line)
line = re.sub(r'\w+://', '', line)
# split line into valid keywords
parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES]
# Drop usernames and passwords
parts[0] = _strip_username_password(parts[0])
return '%s.list' % _cleanup_filename(' '.join(parts[:1]))
def _parse(self, line, raise_if_invalid_or_disabled=False):
valid = False
enabled = True
source = ''
comment = ''
line = line.strip()
if line.startswith('#'):
enabled = False
line = line[1:]
# Check for another "#" in the line and treat a part after it as a comment.
i = line.find('#')
if i > 0:
comment = line[i + 1:].strip()
line = line[:i]
# Split a source into substring to make sure that it is source spec.
# Duplicated whitespaces in a valid source spec will be removed.
source = line.strip()
if source:
chunks = source.split()
if chunks[0] in VALID_SOURCE_TYPES:
valid = True
source = ' '.join(chunks)
if raise_if_invalid_or_disabled and (not valid or not enabled):
raise InvalidSource(line)
return valid, enabled, source, comment
@staticmethod
def _apt_cfg_file(filespec):
'''
Wrapper for `apt_pkg` module for running with Python 2.5
'''
try:
result = apt_pkg.config.find_file(filespec)
except AttributeError:
result = apt_pkg.Config.FindFile(filespec)
return result
@staticmethod
def _apt_cfg_dir(dirspec):
'''
Wrapper for `apt_pkg` module for running with Python 2.5
'''
try:
result = apt_pkg.config.find_dir(dirspec)
except AttributeError:
result = apt_pkg.Config.FindDir(dirspec)
return result
def load(self, file):
group = []
f = open(file, 'r')
for n, line in enumerate(f):
valid, enabled, source, comment = self._parse(line)
group.append((n, valid, enabled, source, comment))
self.files[file] = group
def save(self):
for filename, sources in list(self.files.items()):
if sources:
d, fn = os.path.split(filename)
try:
os.makedirs(d)
except OSError as ex:
if not os.path.isdir(d):
self.module.fail_json("Failed to create directory %s: %s" % (d, to_native(ex)))
try:
fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d)
except (OSError, IOError) as e:
self.module.fail_json(msg='Unable to create temp file at "%s" for apt source: %s' % (d, to_native(e)))
f = os.fdopen(fd, 'w')
for n, valid, enabled, source, comment in sources:
chunks = []
if not enabled:
chunks.append('# ')
chunks.append(source)
if comment:
chunks.append(' # ')
chunks.append(comment)
chunks.append('\n')
line = ''.join(chunks)
try:
f.write(line)
except IOError as ex:
self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, to_native(ex)))
self.module.atomic_move(tmp_path, filename)
# allow the user to override the default mode
if filename in self.new_repos:
this_mode = self.module.params.get('mode', DEFAULT_SOURCES_PERM)
self.module.set_mode_if_different(filename, this_mode, False)
else:
del self.files[filename]
if os.path.exists(filename):
os.remove(filename)
def dump(self):
dumpstruct = {}
for filename, sources in self.files.items():
if sources:
lines = []
for n, valid, enabled, source, comment in sources:
chunks = []
if not enabled:
chunks.append('# ')
chunks.append(source)
if comment:
chunks.append(' # ')
chunks.append(comment)
chunks.append('\n')
lines.append(''.join(chunks))
dumpstruct[filename] = ''.join(lines)
return dumpstruct
def _choice(self, new, old):
if new is None:
return old
return new
def modify(self, file, n, enabled=None, source=None, comment=None):
'''
This function to be used with iterator, so we don't care of invalid sources.
If source, enabled, or comment is None, original value from line ``n`` will be preserved.
'''
valid, enabled_old, source_old, comment_old = self.files[file][n][1:]
self.files[file][n] = (n, valid, self._choice(enabled, enabled_old), self._choice(source, source_old), self._choice(comment, comment_old))
def _add_valid_source(self, source_new, comment_new, file):
# We'll try to reuse disabled source if we have it.
# If we have more than one entry, we will enable them all - no advanced logic, remember.
self.module.log('ading source file: %s | %s | %s' % (source_new, comment_new, file))
found = False
for filename, n, enabled, source, comment in self:
if source == source_new:
self.modify(filename, n, enabled=True)
found = True
if not found:
if file is None:
file = self.default_file
else:
file = self._expand_path(file)
if file not in self.files:
self.files[file] = []
files = self.files[file]
files.append((len(files), True, True, source_new, comment_new))
self.new_repos.add(file)
def add_source(self, line, comment='', file=None):
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
# Prefer separate files for new sources.
self._add_valid_source(source, comment, file=file or self._suggest_filename(source))
def _remove_valid_source(self, source):
# If we have more than one entry, we will remove them all (not comment, remove!)
for filename, n, enabled, src, comment in self:
if source == src and enabled:
self.files[filename].pop(n)
def remove_source(self, line):
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
self._remove_valid_source(source)
class UbuntuSourcesList(SourcesList):
LP_API = 'https://launchpad.net/api/1.0/~%s/+archive/%s'
def __init__(self, module):
self.module = module
self.codename = module.params['codename'] or distro.codename
super(UbuntuSourcesList, self).__init__(module)
self.apt_key_bin = self.module.get_bin_path('apt-key', required=False)
self.gpg_bin = self.module.get_bin_path('gpg', required=False)
if not self.apt_key_bin and not self.gpg_bin:
self.module.fail_json(msg='Either apt-key or gpg binary is required, but neither could be found')
def __deepcopy__(self, memo=None):
return UbuntuSourcesList(self.module)
def _get_ppa_info(self, owner_name, ppa_name):
lp_api = self.LP_API % (owner_name, ppa_name)
headers = dict(Accept='application/json')
response, info = fetch_url(self.module, lp_api, headers=headers)
if info['status'] != 200:
self.module.fail_json(msg="failed to fetch PPA information, error was: %s" % info['msg'])
return json.loads(to_native(response.read()))
def _expand_ppa(self, path):
ppa = path.split(':')[1]
ppa_owner = ppa.split('/')[0]
try:
ppa_name = ppa.split('/')[1]
except IndexError:
ppa_name = 'ppa'
line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, self.codename)
return line, ppa_owner, ppa_name
def _key_already_exists(self, key_fingerprint):
if self.apt_key_bin:
rc, out, err = self.module.run_command([self.apt_key_bin, 'export', key_fingerprint], check_rc=True)
found = len(err) == 0
else:
found = self._gpg_key_exists(key_fingerprint)
return found
def _gpg_key_exists(self, key_fingerprint):
found = False
keyfiles = ['/etc/apt/trusted.gpg'] # main gpg repo for apt
for other_dir in ('/etc/apt/trusted.gpg.d', '/usr/share/keyrings'):
# add other known sources of gpg sigs for apt, skip hidden files
keyfiles.extend([os.path.join(other_dir, x) for x in os.listdir(other_dir) if not x.startswith('.')])
for key_file in keyfiles:
if os.path.exists(key_file):
try:
rc, out, err = self.module.run_command([self.gpg_bin, '--list-packets', key_file])
except (IOError, OSError) as e:
self.debug("Could check key against file %s: %s" % (key_file, to_native(e)))
continue
if key_fingerprint in out:
found = True
break
return found
# https://www.linuxuprising.com/2021/01/apt-key-is-deprecated-how-to-add.html
def add_source(self, line, comment='', file=None):
if line.startswith('ppa:'):
source, ppa_owner, ppa_name = self._expand_ppa(line)
if source in self.repos_urls:
# repository already exists
return
info = self._get_ppa_info(ppa_owner, ppa_name)
# add gpg sig if needed
if not self._key_already_exists(info['signing_key_fingerprint']):
# TODO: report file that would have been added if not check_mode
keyfile = ''
if not self.module.check_mode:
if self.apt_key_bin:
command = [self.apt_key_bin, 'adv', '--recv-keys', '--no-tty', '--keyserver', 'hkp://keyserver.ubuntu.com:80',
info['signing_key_fingerprint']]
else:
keyfile = '/usr/share/keyrings/%s-%s-%s.gpg' % (os.path.basename(source).replace(' ', '-'), ppa_owner, ppa_name)
command = [self.gpg_bin, '--no-tty', '--keyserver', 'hkp://keyserver.ubuntu.com:80', '--export', info['signing_key_fingerprint']]
rc, stdout, stderr = self.module.run_command(command, check_rc=True, encoding=None)
if keyfile:
# using gpg we must write keyfile ourselves
if len(stdout) == 0:
self.module.fail_json(msg='Unable to get required signing key', rc=rc, stderr=stderr, command=command)
try:
with open(keyfile, 'wb') as f:
f.write(stdout)
self.module.log('Added repo key "%s" for apt to file "%s"' % (info['signing_key_fingerprint'], keyfile))
except (OSError, IOError) as e:
self.module.fail_json(msg='Unable to add required signing key for%s ', rc=rc, stderr=stderr, error=to_native(e))
# apt source file
file = file or self._suggest_filename('%s_%s' % (line, self.codename))
else:
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
file = file or self._suggest_filename(source)
self._add_valid_source(source, comment, file)
def remove_source(self, line):
if line.startswith('ppa:'):
source = self._expand_ppa(line)[0]
else:
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
self._remove_valid_source(source)
@property
def repos_urls(self):
_repositories = []
for parsed_repos in self.files.values():
for parsed_repo in parsed_repos:
valid = parsed_repo[1]
enabled = parsed_repo[2]
source_line = parsed_repo[3]
if not valid or not enabled:
continue
if source_line.startswith('ppa:'):
source, ppa_owner, ppa_name = self._expand_ppa(source_line)
_repositories.append(source)
else:
_repositories.append(source_line)
return _repositories
def revert_sources_list(sources_before, sources_after, sourceslist_before):
'''Revert the sourcelist files to their previous state.'''
# First remove any new files that were created:
for filename in set(sources_after.keys()).difference(sources_before.keys()):
if os.path.exists(filename):
os.remove(filename)
# Now revert the existing files to their former state:
sourceslist_before.save()
def main():
module = AnsibleModule(
argument_spec=dict(
repo=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
mode=dict(type='raw'),
update_cache=dict(type='bool', default=True, aliases=['update-cache']),
update_cache_retries=dict(type='int', default=5),
update_cache_retry_max_delay=dict(type='int', default=12),
filename=dict(type='str'),
# This should not be needed, but exists as a failsafe
install_python_apt=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
codename=dict(type='str'),
),
supports_check_mode=True,
)
params = module.params
repo = module.params['repo']
state = module.params['state']
update_cache = module.params['update_cache']
# Note: mode is referenced in SourcesList class via the passed in module (self here)
sourceslist = None
if not HAVE_PYTHON_APT:
# This interpreter can't see the apt Python library- we'll do the following to try and fix that:
# 1) look in common locations for system-owned interpreters that can see it; if we find one, respawn under it
# 2) finding none, try to install a matching python-apt package for the current interpreter version;
# we limit to the current interpreter version to try and avoid installing a whole other Python just
# for apt support
# 3) if we installed a support package, try to respawn under what we think is the right interpreter (could be
# the current interpreter again, but we'll let it respawn anyway for simplicity)
# 4) if still not working, return an error and give up (some corner cases not covered, but this shouldn't be
# made any more complex than it already is to try and cover more, eg, custom interpreters taking over
# system locations)
apt_pkg_name = 'python3-apt' if PY3 else 'python-apt'
if has_respawned():
# this shouldn't be possible; short-circuit early if it happens...
module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
interpreters = ['/usr/bin/python3', '/usr/bin/python2', '/usr/bin/python']
interpreter = probe_interpreters_for_module(interpreters, 'apt')
if interpreter:
# found the Python bindings; respawn this module under the interpreter where we found them
respawn_module(interpreter)
# this is the end of the line for this process, it will exit here once the respawned module has completed
# don't make changes if we're in check_mode
if module.check_mode:
module.fail_json(msg="%s must be installed to use check mode. "
"If run normally this module can auto-install it." % apt_pkg_name)
if params['install_python_apt']:
install_python_apt(module, apt_pkg_name)
else:
module.fail_json(msg='%s is not installed, and install_python_apt is False' % apt_pkg_name)
# try again to find the bindings in common places
interpreter = probe_interpreters_for_module(interpreters, 'apt')
if interpreter:
# found the Python bindings; respawn this module under the interpreter where we found them
# NB: respawn is somewhat wasteful if it's this interpreter, but simplifies the code
respawn_module(interpreter)
# this is the end of the line for this process, it will exit here once the respawned module has completed
else:
# we've done all we can do; just tell the user it's busted and get out
module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
if not repo:
module.fail_json(msg='Please set argument \'repo\' to a non-empty value')
if isinstance(distro, aptsources_distro.Distribution):
sourceslist = UbuntuSourcesList(module)
else:
module.fail_json(msg='Module apt_repository is not supported on target.')
sourceslist_before = copy.deepcopy(sourceslist)
sources_before = sourceslist.dump()
try:
if state == 'present':
sourceslist.add_source(repo)
elif state == 'absent':
sourceslist.remove_source(repo)
except InvalidSource as ex:
module.fail_json(msg='Invalid repository string: %s' % to_native(ex))
sources_after = sourceslist.dump()
changed = sources_before != sources_after
if changed and module._diff:
diff = []
for filename in set(sources_before.keys()).union(sources_after.keys()):
diff.append({'before': sources_before.get(filename, ''),
'after': sources_after.get(filename, ''),
'before_header': (filename, '/dev/null')[filename not in sources_before],
'after_header': (filename, '/dev/null')[filename not in sources_after]})
else:
diff = {}
if changed and not module.check_mode:
try:
sourceslist.save()
if update_cache:
err = ''
update_cache_retries = module.params.get('update_cache_retries')
update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
randomize = random.randint(0, 1000) / 1000.0
for retry in range(update_cache_retries):
try:
cache = apt.Cache()
cache.update()
break
except apt.cache.FetchFailedException as e:
err = to_native(e)
# Use exponential backoff with a max fail count, plus a little bit of randomness
delay = 2 ** retry + randomize
if delay > update_cache_retry_max_delay:
delay = update_cache_retry_max_delay + randomize
time.sleep(delay)
else:
revert_sources_list(sources_before, sources_after, sourceslist_before)
module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
except (OSError, IOError) as ex:
revert_sources_list(sources_before, sources_after, sourceslist_before)
module.fail_json(msg=to_native(ex))
module.exit_json(changed=changed, repo=repo, state=state, diff=diff)
if __name__ == '__main__':
main()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,639 |
tests: `test_get_text_width_no_locale()` fails on Ubuntu 22.04
|
### Summary
The unit test ``test_get_text_width_no_locale()`` in test/units/utils/test_display.py fails on Ubuntu 22.04
```
def test_get_text_width_no_locale():
locale.setlocale(locale.LC_ALL, 'C.UTF-8')
> pytest.raises(EnvironmentError, get_text_width, '\U000110cd')
E Failed: DID NOT RAISE <class 'OSError'>
test/units/utils/test_display.py:40: Failed
```
the cause appears to be a new behaviour of the C function `wcwidth()`, which no longer returns -1 for the character used ([U+110CD KAITHI NUMBER SIGN ABOVE](https://www.compart.com/en/unicode/U+110CD), unicode 11.0, June 2018).
```
$ docker run -it --rm public.ecr.aws/n5z0e8q9/ubuntu2004-test python3 -c "import ctypes,ctypes.util; _LIBC = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c')); _LIBC.wcwidth.argtypes = (ctypes.c_wchar,); _LIBC.wcswidth.argtypes = (ctypes.c_wchar_p, ctypes.c_int); _MAX_INT = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1; print(_LIBC.wcswidth('\\U000110cd', _MAX_INT))"
-1
```
```
$ lsb_release -d
Description: Ubuntu 22.04.1 LTS
$ python3 -c "import ctypes,ctypes.util; _LIBC = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c')); _LIBC.wcwidth.argtypes = (ctypes.c_wchar,); _LIBC.wcswidth.argtypes = (ctypes.c_wchar_p, ctypes.c_int); _MAX_INT = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1; print(_LIBC.wcwidth('\\U000110cd'))"
1
```
### Issue Type
Bug Report
### Component Name
ansible-test
### Ansible Version
```console
$ ansible --version
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel"
if you are modifying the Ansible engine, or trying out features under development. This is a rapidly
changing source of code and can become unstable at any point.
ansible [core 2.14.0.dev0] (test_ansible_version-devel 8c6e00df28) last updated 2022/08/25 11:58:21 (GMT +100)
config file = None
configured module search path = ['/home/willmerae/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/willmerae/src/ansible/lib/ansible
ansible collection location = /home/willmerae/.ansible/collections:/usr/share/ansible/collections
executable location = /home/willmerae/src/ansible/bin/ansible
python version = 3.10.4 (main, Jun 29 2022, 12:14:53) [GCC 11.2.0] (/usr/bin/python)
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
[WARNING]: You are running the development version of Ansible. You should only
run Ansible from "devel" if you are modifying the Ansible engine, or trying out
features under development. This is a rapidly changing source of code and can
become unstable at any point.
CONFIG_FILE() = None
```
### OS / Environment
Ubuntu 22.04 LTS x64
### Steps to Reproduce
```
$ source hacking/env-setup
$ ansible-test units --venv --python 3.9
### Expected Results
All unit tests pass
### Actual Results
```console
Unit test modules with Python 3.9
============================= test session starts ==============================
platform linux -- Python 3.9.13, pytest-7.0.1, pluggy-1.0.0
rootdir: /home/willmerae/src/ansible, configfile: test/lib/ansible_test/_data/pytest/config/default.ini
plugins: xdist-2.5.0, forked-1.4.0, mock-3.7.0
gw0 I / gw1 I / gw2 I / gw3 I / gw4 I / gw5 I / gw6 I / gw7 I / gw8 I / gw9 I / gw10 I / gw11 I
gw0 [123] / gw1 [123] / gw2 [123] / gw3 [123] / gw4 [123] / gw5 [123] / gw6 [123] / gw7 [123] / gw8 [123] / gw9 [123] / gw10 [123] / gw11 [123]
........................................................................ [ 58%]
................................................... [100%]
- generated xml file: /home/willmerae/src/ansible/test/results/junit/python3.9-modules-units.xml -
============================= slowest 25 durations =============================
0.07s call test/units/modules/test_hostname.py::TestHostname::test_stategy_get_never_writes_in_check_mode
0.03s call test/units/modules/test_async_wrapper.py::TestAsyncWrapper::test_run_module
0.03s call test/units/modules/test_iptables.py::TestIptables::test_log_level
0.02s setup test/units/modules/test_service.py::test_sunos_service_start
0.01s call test/units/modules/test_known_hosts.py::KnownHostsDiffTestCase::test_sanity_check
0.01s call test/units/modules/test_copy.py::test_split_pre_existing_dir_working_dir_exists[dir1-expected0]
0.01s setup test/units/modules/test_copy.py::test_split_pre_existing_dir_one_level_exists[dir1/dir2/-expected3]
0.01s call test/units/modules/test_iptables.py::TestIptables::test_iprange
0.01s call test/units/modules/test_iptables.py::TestIptables::test_tcp_flags
0.01s setup test/units/modules/test_copy.py::test_split_pre_existing_dir_one_level_exists[/dir1/-expected5]
0.01s call test/units/modules/test_iptables.py::TestIptables::test_chain_creation
0.01s setup test/units/modules/test_copy.py::test_good_symbolic_modes[32768-a+X-0]
0.01s setup test/units/modules/test_copy.py::test_good_symbolic_modes[16384-u+rwx-448]
0.01s call test/units/modules/test_copy.py::test_split_pre_existing_dir_working_dir_exists[dir1/dir2-expected2]
0.01s setup test/units/modules/test_pip.py::test_failure_when_pip_absent[patch_ansible_module0]
0.01s setup test/units/modules/test_copy.py::test_good_symbolic_modes[16895-g-rwx-455]
0.01s setup test/units/modules/test_copy.py::test_good_symbolic_modes[16384-g=rwx-56]
0.01s call test/units/modules/test_iptables.py::TestIptables::test_chain_deletion_check_mode
0.01s call test/units/modules/test_iptables.py::TestIptables::test_without_required_parameters
0.01s setup test/units/modules/test_copy.py::test_good_symbolic_modes[16895-a-X-438]
0.01s setup test/units/modules/test_copy.py::test_good_symbolic_modes[16384-g+rwx-56]
0.01s setup test/units/modules/test_copy.py::test_good_symbolic_modes[16384-u=rwx,g=rwx,o=rwx-511]
0.01s call test/units/modules/test_iptables.py::TestIptables::test_insert_rule_with_wait
0.01s setup test/units/modules/test_copy.py::test_good_symbolic_modes[16895-a-rwx-0]
0.01s setup test/units/modules/test_copy.py::test_good_symbolic_modes[16384-u+rwx,g+rwx,o+rwx-511]
============================= 123 passed in 1.40s ==============================
Unit test module_utils with Python 3.9
============================= test session starts ==============================
platform linux -- Python 3.9.13, pytest-7.0.1, pluggy-1.0.0
rootdir: /home/willmerae/src/ansible, configfile: test/lib/ansible_test/_data/pytest/config/default.ini
plugins: xdist-2.5.0, forked-1.4.0, mock-3.7.0
gw0 I / gw1 I / gw2 I / gw3 I / gw4 I / gw5 I / gw6 I / gw7 I / gw8 I / gw9 I / gw10 I / gw11 I
gw0 [1673] / gw1 [1673] / gw2 [1673] / gw3 [1673] / gw4 [1673] / gw5 [1673] / gw6 [1673] / gw7 [1673] / gw8 [1673] / gw9 [1673] / gw10 [1673] / gw11 [1673]
........................................................................ [ 4%]
........................................................................ [ 8%]
.......................s....s....s..s..s....s...ss...s.s.s.......s.....s [ 12%]
....s................................................................... [ 17%]
........................................................................ [ 21%]
........................................................................ [ 25%]
........................................................................ [ 30%]
........................................................................ [ 34%]
......................................................................... [ 38%]
........................................................................ [ 43%]
........................................................................ [ 47%]
........................................................................ [ 51%]
........................................................................ [ 56%]
........................................................................ [ 60%]
........................................................................ [ 64%]
........................................................................ [ 68%]
........................................................................ [ 73%]
........................................................................ [ 77%]
........................................................................ [ 81%]
.s......s.........................................s....s.s.s............ [ 86%]
..................s..................................................... [ 90%]
........................................................................ [ 94%]
........................................................................ [ 99%]
................ [100%]
- generated xml file: /home/willmerae/src/ansible/test/results/junit/python3.9-module_utils-units.xml -
============================= slowest 25 durations =============================
3.01s call test/units/module_utils/facts/test_timeout.py::test_implicit_file_overridden_succeeds
2.01s call test/units/module_utils/facts/test_timeout.py::test_explicit_timeout
2.01s call test/units/module_utils/facts/test_timeout.py::test_explicit_succeeds
2.01s call test/units/module_utils/facts/test_timeout.py::test_implicit_file_overridden_timesout
1.02s call test/units/module_utils/facts/test_timeout.py::test_implicit_file_default_timesout
1.01s call test/units/module_utils/facts/test_timeout.py::test_implicit_file_default_succeeds
1.01s call test/units/module_utils/facts/test_timeout.py::test_timeout_raises_timeout
1.01s call test/units/module_utils/facts/test_timeout.py::test_exception_not_caught_by_called_code
1.00s call test/units/module_utils/facts/test_timeout.py::test_timeout_raises_timeout_integration_test[stdin0]
1.00s call test/units/module_utils/test_api.py::TestRetryWithDelaysAndCondition::test_retry_exception
1.00s call test/units/module_utils/test_api.py::TestRateLimit::test_ratelimit
0.22s call test/units/module_utils/facts/test_facts.py::TestFactsLinuxHardwareGetMountFacts::test_get_mount_facts
0.22s call test/units/module_utils/facts/hardware/test_linux.py::TestFactsLinuxHardwareGetMountFacts::test_get_mount_facts
0.14s call test/units/module_utils/facts/test_ansible_collector.py::TestPkgMgrOSTreeFacts::test_is_rhel_edge_ostree
0.14s call test/units/module_utils/facts/test_ansible_collector.py::TestExceptionCollectedFacts::test_expected_facts
0.14s call test/units/module_utils/facts/test_ansible_collector.py::TestInPlace::test1
0.13s call test/units/module_utils/facts/test_ansible_collector.py::TestPkgMgrFacts::test_expected_facts
0.13s call test/units/module_utils/facts/test_ansible_collector.py::TestPkgMgrFacts::test_not_expected_facts
0.13s call test/units/module_utils/facts/test_ansible_collector.py::TestPkgMgrOSTreeFacts::test_is_fedora_ostree
0.13s call test/units/module_utils/facts/test_ansible_collector.py::TestPkgMgrFacts::test_basics
0.13s call test/units/module_utils/facts/test_ansible_collector.py::TestPkgMgrOSTreeFacts::test_not_expected_facts
0.12s call test/units/module_utils/facts/test_ansible_collector.py::TestCollectedFacts::test_basics
0.12s call test/units/module_utils/facts/test_ansible_collector.py::TestExceptionCollectedFacts::test_basics
0.12s call test/units/module_utils/facts/test_ansible_collector.py::TestCollectedFacts::test_expected_facts
0.12s call test/units/module_utils/facts/test_ansible_collector.py::TestExceptionCollectedFacts::test_not_expected_facts
=========================== short test summary info ============================
SKIPPED [1] test/units/module_utils/basic/test_imports.py:84: literal_eval is available in every version of Python3
SKIPPED [5] test/units/module_utils/basic/test_log.py:38: python systemd bindings not installed
SKIPPED [5] test/units/module_utils/basic/test_log.py:130: python systemd bindings not installed
SKIPPED [1] test/units/module_utils/basic/test_log.py:139: python systemd bindings not installed
SKIPPED [2] test/units/module_utils/basic/test_log.py:113: python systemd bindings not installed
SKIPPED [1] test/units/module_utils/facts/test_collectors.py:370: faulty test
SKIPPED [1] test/units/module_utils/facts/test_collectors.py:396: faulty test
SKIPPED [1] test/units/module_utils/facts/test_facts.py:46: This platform (DragonFly) does not have a fact_class.
SKIPPED [3] test/units/module_utils/facts/test_facts.py:64: This test class needs to be updated to specify collector_class
SKIPPED [1] test/units/module_utils/facts/test_facts.py:54: This platform (DragonFly) does not have a fact_class.
====================== 1652 passed, 21 skipped in 12.17s =======================
Unit test controller with Python 3.9
============================= test session starts ==============================
platform linux -- Python 3.9.13, pytest-7.0.1, pluggy-1.0.0
rootdir: /home/willmerae/src/ansible, configfile: test/lib/ansible_test/_data/pytest/config/default.ini
plugins: xdist-2.5.0, forked-1.4.0, mock-3.7.0
gw0 I / gw1 I / gw2 I / gw3 I / gw4 I / gw5 I / gw6 I / gw7 I / gw8 I / gw9 I / gw10 I / gw11 I
gw0 [1871] / gw1 [1871] / gw2 [1871] / gw3 [1871] / gw4 [1871] / gw5 [1871] / gw6 [1871] / gw7 [1871] / gw8 [1871] / gw9 [1871] / gw10 [1871] / gw11 [1871]
........................................................................ [ 3%]
........................................................................ [ 7%]
........................................................................ [ 11%]
........................................................................ [ 15%]
........................................................................ [ 19%]
........................................................................ [ 23%]
........................................................................ [ 26%]
........................................................................ [ 30%]
........................................................................ [ 34%]
........................................................................ [ 38%]
.........................................s.............................. [ 42%]
........................................................................ [ 46%]
........................................................................ [ 50%]
........................................................................ [ 53%]
........................................................................ [ 57%]
........................................................................ [ 61%]
........................................................................ [ 65%]
........................................................................ [ 69%]
........................................................................ [ 73%]
........................................................................ [ 76%]
...................s....s.....s....s......s.....s....................... [ 80%]
........................................................................ [ 84%]
..........F............................................................. [ 88%]
........................................................................ [ 92%]
........................................................................ [ 96%]
.................................s..................................... [100%]
=================================== FAILURES ===================================
________________________ test_get_text_width_no_locale _________________________
[gw7] linux -- Python 3.9.13 /home/willmerae/src/ansible/test/results/.tmp/delegation/python3.9/bin/python
def test_get_text_width_no_locale():
locale.setlocale(locale.LC_ALL, 'C.UTF-8')
> pytest.raises(EnvironmentError, get_text_width, '\U000110cd')
E Failed: DID NOT RAISE <class 'OSError'>
test/units/utils/test_display.py:40: Failed
- generated xml file: /home/willmerae/src/ansible/test/results/junit/python3.9-controller-units.xml -
============================= slowest 25 durations =============================
2.16s call test/units/utils/test_encrypt.py::test_encrypt_with_ident
2.11s call test/units/executor/test_task_executor.py::TestTaskExecutor::test_task_executor_run_loop
1.15s call test/units/plugins/lookup/test_password.py::TestLookupModuleWithPasslibWrappedAlgo::test_encrypt_wrapped_crypt_algo
1.13s call test/units/utils/test_encrypt.py::test_encrypt_default_rounds
1.01s call test/units/galaxy/test_api.py::test_wait_import_task_timeout[https://galaxy.server.com/api-v2-Token-token_ins0-1234-https://galaxy.server.com/api/v2/collection-imports/1234/]
1.00s call test/units/galaxy/test_api.py::test_wait_import_task_timeout[https://galaxy.server.com/api/automation-hub-v3-Bearer-token_ins1-1234-https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/]
0.77s call test/units/cli/test_galaxy.py::TestGalaxy::test_execute_remove
0.73s call test/units/cli/test_galaxy.py::TestGalaxy::test_run
0.61s call test/units/cli/test_adhoc.py::test_simple_command
0.60s call test/units/utils/test_encrypt.py::test_do_encrypt_passlib
0.52s call test/units/parsing/yaml/test_loader.py::TestAnsibleLoaderVault::test_embedded_vault
0.45s call test/units/utils/test_encrypt.py::test_passlib_bcrypt_salt
0.44s call test/units/utils/display/test_logger.py::test_logger
0.38s call test/units/cli/test_galaxy.py::test_install_implicit_role_with_collections[\ncollections:\n- namespace.name\nroles:\n- namespace.name\n]
0.37s call test/units/utils/collection_loader/test_collection_loader.py::test_import_from_collection
0.36s call test/units/utils/collection_loader/test_collection_loader.py::test_collpkg_loader_load_module
0.33s call test/units/utils/collection_loader/test_collection_loader.py::test_collection_get_data
0.32s call test/units/utils/collection_loader/test_collection_loader.py::test_bogus_imports
0.32s call test/units/cli/test_galaxy.py::test_parse_requirements_with_collection_source[\ncollections:\n- name: namespace.collection\n- name: namespace2.collection2\n source: https://galaxy-dev.ansible.com/\n- name: namespace3.collection3\n source: server\n]
0.32s call test/units/cli/test_galaxy.py::test_parse_requirements_with_roles_and_collections[\nroles:\n- username.role_name\n- src: username2.role_name2\n- src: ssh://github.com/user/repo\n scm: git\n\ncollections:\n- namespace.collection2\n]
0.30s call test/units/cli/test_galaxy.py::test_parse_requirements_roles_with_include_missing[\n- username.role\n- include: missing.yml\n]
0.29s call test/units/cli/test_galaxy.py::test_parse_requirements_roles_with_include[\n- username.included_role\n- src: https://github.com/user/repo\n]
0.25s call test/units/executor/test_play_iterator.py::TestPlayIterator::test_play_iterator
0.23s call test/units/executor/test_play_iterator.py::TestPlayIterator::test_play_iterator_nested_blocks
0.21s call test/units/ansible_test/ci/test_azp.py::test_auth
=========================== short test summary info ============================
SKIPPED [1] test/units/parsing/vault/test_vault.py:783: This test is not ready yet
SKIPPED [1] test/units/plugins/strategy/test_strategy.py:121: Temporarily disabled due to fragile tests that need rewritten
SKIPPED [1] test/units/plugins/strategy/test_strategy.py:43: Temporarily disabled due to fragile tests that need rewritten
SKIPPED [1] test/units/plugins/strategy/test_strategy.py:425: Temporarily disabled due to fragile tests that need rewritten
SKIPPED [1] test/units/plugins/strategy/test_strategy.py:218: Temporarily disabled due to fragile tests that need rewritten
SKIPPED [1] test/units/plugins/strategy/test_strategy.py:175: Temporarily disabled due to fragile tests that need rewritten
SKIPPED [1] test/units/plugins/strategy/test_strategy.py:69: Temporarily disabled due to fragile tests that need rewritten
SKIPPED [1] test/units/vars/test_module_response_deepcopy.py:40: No current support for this situation
FAILED test/units/utils/test_display.py::test_get_text_width_no_locale
================== 1 failed, 1862 passed, 8 skipped in 9.20s ===================
FATAL: Command "pytest --forked -r a -n auto --color yes -p no:cacheprovider -c /home/willmerae/src/ansible/test/lib/ansible_test/_data/pytest/config/default.ini --junit-xml /home/willmerae/src/ansible/test/results/junit/python3.9-controller-units.xml --strict-markers --rootdir /home/willmerae/src/ansible --durations=25 test/units/_vendor/test_vendor.py test/units/ansible_test/ci/test_azp.py test/units/cli/arguments/test_optparse_helpers.py test/units/cli/galaxy/test_collection_extract_tar.py test/units/cli/galaxy/test_display_collection.py test/units/cli/galaxy/test_display_header.py test/units/cli/galaxy/test_display_role.py test/units/cli/galaxy/test_execute_list.py test/units/cli/galaxy/test_execute_list_collection.py test/units/cli/galaxy/test_get_collection_widths.py test/units/cli/test_adhoc.py test/units/cli/test_cli.py test/units/cli/test_console.py test/units/cli/test_doc.py test/units/cli/test_galaxy.py test/units/cli/test_playbook.py test/units/cli/test_vault.py test/units/config/manager/test_find_ini_config_file.py test/units/config/test_manager.py test/units/errors/test_errors.py test/units/executor/module_common/test_modify_module.py test/units/executor/module_common/test_module_common.py test/units/executor/module_common/test_recursive_finder.py test/units/executor/test_interpreter_discovery.py test/units/executor/test_play_iterator.py test/units/executor/test_playbook_executor.py test/units/executor/test_task_executor.py test/units/executor/test_task_queue_manager_callbacks.py test/units/executor/test_task_result.py test/units/galaxy/test_api.py test/units/galaxy/test_collection.py test/units/galaxy/test_collection_install.py test/units/galaxy/test_role_install.py test/units/galaxy/test_role_requirements.py test/units/galaxy/test_token.py test/units/galaxy/test_user_agent.py test/units/inventory/test_group.py test/units/inventory/test_host.py test/units/parsing/test_ajson.py test/units/parsing/test_dataloader.py test/units/parsing/test_mod_args.py test/units/parsing/test_splitter.py test/units/parsing/test_unquote.py test/units/parsing/utils/test_addresses.py test/units/parsing/utils/test_jsonify.py test/units/parsing/utils/test_yaml.py test/units/parsing/vault/test_vault.py test/units/parsing/vault/test_vault_editor.py test/units/parsing/yaml/test_constructor.py test/units/parsing/yaml/test_dumper.py test/units/parsing/yaml/test_loader.py test/units/parsing/yaml/test_objects.py test/units/playbook/role/test_include_role.py test/units/playbook/role/test_role.py test/units/playbook/test_attribute.py test/units/playbook/test_base.py test/units/playbook/test_block.py test/units/playbook/test_collectionsearch.py test/units/playbook/test_conditional.py test/units/playbook/test_helpers.py test/units/playbook/test_included_file.py test/units/playbook/test_play.py test/units/playbook/test_play_context.py test/units/playbook/test_playbook.py test/units/playbook/test_taggable.py test/units/playbook/test_task.py test/units/plugins/action/test_action.py test/units/plugins/action/test_gather_facts.py test/units/plugins/action/test_pause.py test/units/plugins/action/test_raw.py test/units/plugins/become/test_su.py test/units/plugins/become/test_sudo.py test/units/plugins/cache/test_cache.py test/units/plugins/callback/test_callback.py test/units/plugins/connection/test_connection.py test/units/plugins/connection/test_local.py test/units/plugins/connection/test_paramiko.py test/units/plugins/connection/test_psrp.py test/units/plugins/connection/test_ssh.py test/units/plugins/connection/test_winrm.py test/units/plugins/filter/test_core.py test/units/plugins/filter/test_mathstuff.py test/units/plugins/inventory/test_constructed.py test/units/plugins/inventory/test_inventory.py test/units/plugins/inventory/test_script.py test/units/plugins/lookup/test_env.py test/units/plugins/lookup/test_ini.py test/units/plugins/lookup/test_password.py test/units/plugins/lookup/test_url.py test/units/plugins/shell/test_cmd.py test/units/plugins/shell/test_powershell.py test/units/plugins/strategy/test_linear.py test/units/plugins/strategy/test_strategy.py test/units/plugins/test_plugins.py test/units/regex/test_invalid_var_names.py test/units/template/test_native_concat.py test/units/template/test_templar.py test/units/template/test_template_utilities.py test/units/template/test_vars.py test/units/test_constants.py test/units/test_context.py test/units/test_no_tty.py test/units/utils/collection_loader/test_collection_loader.py test/units/utils/display/test_broken_cowsay.py test/units/utils/display/test_display.py test/units/utils/display/test_logger.py test/units/utils/display/test_warning.py test/units/utils/test_cleanup_tmp_file.py test/units/utils/test_context_objects.py test/units/utils/test_display.py test/units/utils/test_encrypt.py test/units/utils/test_helpers.py test/units/utils/test_isidentifier.py test/units/utils/test_plugin_docs.py test/units/utils/test_shlex.py test/units/utils/test_unsafe_proxy.py test/units/utils/test_vars.py test/units/utils/test_version.py test/units/vars/test_module_response_deepcopy.py test/units/vars/test_variable_manager.py" returned exit status 1.
FATAL: Command "/usr/bin/env ANSIBLE_TEST_CONTENT_ROOT=/home/willmerae/src/ansible PYTHONPATH=/tmp/ansible-test-qhlhnur_ /home/willmerae/src/ansible/test/results/.tmp/delegation/python3.9/bin/python /home/willmerae/src/ansible/bin/ansible-test units --containers '{}' --truncate 105 --color yes --host-path test/results/.tmp/host-ah1wk7w7 --metadata test/results/.tmp/metadata-np2gu58c.json" returned exit status 1.
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78639
|
https://github.com/ansible/ansible/pull/78645
|
ebcf1654c49ef3ce53771579aa903f2e1293c231
|
d8cb9117acf2b7a3624eb3b00df2daa6e97c5ae4
| 2022-08-25T13:30:12Z |
python
| 2022-08-29T14:06:37Z |
test/units/utils/test_display.py
|
# -*- coding: utf-8 -*-
# (c) 2020 Matt Martz <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import locale
from unittest.mock import MagicMock
import pytest
from ansible.utils.display import Display, get_text_width
from ansible.utils.multiprocessing import context as multiprocessing_context
def test_get_text_width():
locale.setlocale(locale.LC_ALL, '')
assert get_text_width(u'コンニチハ') == 10
assert get_text_width(u'abコcd') == 6
assert get_text_width(u'café') == 4
assert get_text_width(u'four') == 4
assert get_text_width(u'\u001B') == 0
assert get_text_width(u'ab\u0000') == 2
assert get_text_width(u'abコ\u0000') == 4
assert get_text_width(u'🚀🐮') == 4
assert get_text_width(u'\x08') == 0
assert get_text_width(u'\x08\x08') == 0
assert get_text_width(u'ab\x08cd') == 3
assert get_text_width(u'ab\x1bcd') == 3
assert get_text_width(u'ab\x7fcd') == 3
assert get_text_width(u'ab\x94cd') == 3
pytest.raises(TypeError, get_text_width, 1)
pytest.raises(TypeError, get_text_width, b'four')
def test_get_text_width_no_locale():
locale.setlocale(locale.LC_ALL, 'C.UTF-8')
pytest.raises(EnvironmentError, get_text_width, '\U000110cd')
def test_Display_banner_get_text_width(monkeypatch):
locale.setlocale(locale.LC_ALL, '')
display = Display()
display_mock = MagicMock()
monkeypatch.setattr(display, 'display', display_mock)
display.banner(u'🚀🐮', color=False, cows=False)
args, kwargs = display_mock.call_args
msg = args[0]
stars = u' %s' % (75 * u'*')
assert msg.endswith(stars)
def test_Display_banner_get_text_width_fallback(monkeypatch):
locale.setlocale(locale.LC_ALL, 'C.UTF-8')
display = Display()
display_mock = MagicMock()
monkeypatch.setattr(display, 'display', display_mock)
display.banner(u'\U000110cd', color=False, cows=False)
args, kwargs = display_mock.call_args
msg = args[0]
stars = u' %s' % (78 * u'*')
assert msg.endswith(stars)
def test_Display_set_queue_parent():
display = Display()
pytest.raises(RuntimeError, display.set_queue, 'foo')
def test_Display_set_queue_fork():
def test():
display = Display()
display.set_queue('foo')
assert display._final_q == 'foo'
p = multiprocessing_context.Process(target=test)
p.start()
p.join()
assert p.exitcode == 0
def test_Display_display_fork():
def test():
queue = MagicMock()
display = Display()
display.set_queue(queue)
display.display('foo')
queue.send_display.assert_called_once_with(
'foo', color=None, stderr=False, screen_only=False, log_only=False, newline=True
)
p = multiprocessing_context.Process(target=test)
p.start()
p.join()
assert p.exitcode == 0
def test_Display_display_lock(monkeypatch):
lock = MagicMock()
display = Display()
monkeypatch.setattr(display, '_lock', lock)
display.display('foo')
lock.__enter__.assert_called_once_with()
def test_Display_display_lock_fork(monkeypatch):
lock = MagicMock()
display = Display()
monkeypatch.setattr(display, '_lock', lock)
monkeypatch.setattr(display, '_final_q', MagicMock())
display.display('foo')
lock.__enter__.assert_not_called()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,612 |
failed task inside an include_tasks inside a rescue block does not remove host from ansible_play_hosts nor from ansible_play_batch
|
### Summary
I have created a role that has two tasks files: main and validation. The main task file uses block/rescue to validate the health of a system, with the rescue block trying to recover a failed system and then re-running the validation tasks. If the system still reports failure after an attempted recovery, I expect the failed host to be removed from `ansible_play_hosts` and `ansible_play_batch`; however they persist. Interestingly, the failed host(s) are excluded from future tasks in the play, and are excluded from subsequent plays in a playbook. I was only able to reproduce this if the rescue block had a failure from an `include_tasks`.
### Issue Type
Bug Report
### Component Name
rescue
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.3]
config file = None
python version = 3.10.4 (main, Jun 29 2022, 12:14:53) [GCC 11.2.0]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
```
### OS / Environment
ubuntu 22.04
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
inventory:
```
[all]
a
b
```
roles/fail/tasks/validation.yml:
```yaml
- fail:
when: "inventory_hostname == 'a'"
```
roles/fail/tasks/main.yml:
```yaml
- block:
- include_tasks: "validation.yml"
rescue:
- debug:
msg: "Failed for {{ inventory_hostname }}"
- include_tasks: "validation.yml"
```
playbook:
```yaml
- hosts: "all"
gather_facts: false
tasks:
- include_role:
name: "fail"
- debug:
msg: "{{ item }} is: {{ lookup('vars', item) }}"
loop:
- "ansible_play_hosts_all"
- "ansible_play_hosts"
- "ansible_play_batch"
- hosts: "all"
gather_facts: false
tasks:
- debug:
msg: "second play for {{ inventory_hostname }}"
```
### Expected Results
I expected host a to be removed from ansible_play_hosts and ansible_play_batch
### Actual Results
```console
results:
PLAY [all] ***************************************************************************************************************************************************************************************************************************************************************************************************
TASK [include_role : fail] ***********************************************************************************************************************************************************************************************************************************************************************************
TASK [fail : include_tasks] **********************************************************************************************************************************************************************************************************************************************************************************
included: roles/fail/tasks/validation.yml for a, b
TASK [fail : fail] *******************************************************************************************************************************************************************************************************************************************************************************************
skipping: [b]
fatal: [a]: FAILED! => {"changed": false, "msg": "Failed as requested from task"}
TASK [fail : debug] ******************************************************************************************************************************************************************************************************************************************************************************************
ok: [a] => {
"msg": "Failed for a"
}
TASK [fail : include_tasks] **********************************************************************************************************************************************************************************************************************************************************************************
included: roles/fail/tasks/validation.yml for a
TASK [fail : fail] *******************************************************************************************************************************************************************************************************************************************************************************************
fatal: [a]: FAILED! => {"changed": false, "msg": "Failed as requested from task"}
TASK [debug] *************************************************************************************************************************************************************************************************************************************************************************************************
ok: [b] => (item=ansible_play_hosts_all) => {
"msg": "ansible_play_hosts_all is: ['a', 'b']"
}
ok: [b] => (item=ansible_play_hosts) => {
"msg": "ansible_play_hosts is: ['a', 'b']"
}
ok: [b] => (item=ansible_play_batch) => {
"msg": "ansible_play_batch is: ['a', 'b']"
}
PLAY [all] ***************************************************************************************************************************************************************************************************************************************************************************************************
TASK [debug] *************************************************************************************************************************************************************************************************************************************************************************************************
ok: [b] => {
"msg": "second play for b"
}
PLAY RECAP ***************************************************************************************************************************************************************************************************************************************************************************************************
a : ok=3 changed=0 unreachable=0 failed=1 skipped=0 rescued=1 ignored=0
b : ok=3 changed=0 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78612
|
https://github.com/ansible/ansible/pull/78657
|
38c3b5e8f1cd6898b8dfe54f1849c8e424b5dd49
|
19e7c5b0c1d5f682d2cbbae0ba8fd5eef653ba13
| 2022-08-22T14:16:46Z |
python
| 2022-08-30T15:16:21Z |
changelogs/fragments/78612-rescue-block-ansible_play_hosts.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,612 |
failed task inside an include_tasks inside a rescue block does not remove host from ansible_play_hosts nor from ansible_play_batch
|
### Summary
I have created a role that has two tasks files: main and validation. The main task file uses block/rescue to validate the health of a system, with the rescue block trying to recover a failed system and then re-running the validation tasks. If the system still reports failure after an attempted recovery, I expect the failed host to be removed from `ansible_play_hosts` and `ansible_play_batch`; however they persist. Interestingly, the failed host(s) are excluded from future tasks in the play, and are excluded from subsequent plays in a playbook. I was only able to reproduce this if the rescue block had a failure from an `include_tasks`.
### Issue Type
Bug Report
### Component Name
rescue
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.3]
config file = None
python version = 3.10.4 (main, Jun 29 2022, 12:14:53) [GCC 11.2.0]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
```
### OS / Environment
ubuntu 22.04
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
inventory:
```
[all]
a
b
```
roles/fail/tasks/validation.yml:
```yaml
- fail:
when: "inventory_hostname == 'a'"
```
roles/fail/tasks/main.yml:
```yaml
- block:
- include_tasks: "validation.yml"
rescue:
- debug:
msg: "Failed for {{ inventory_hostname }}"
- include_tasks: "validation.yml"
```
playbook:
```yaml
- hosts: "all"
gather_facts: false
tasks:
- include_role:
name: "fail"
- debug:
msg: "{{ item }} is: {{ lookup('vars', item) }}"
loop:
- "ansible_play_hosts_all"
- "ansible_play_hosts"
- "ansible_play_batch"
- hosts: "all"
gather_facts: false
tasks:
- debug:
msg: "second play for {{ inventory_hostname }}"
```
### Expected Results
I expected host a to be removed from ansible_play_hosts and ansible_play_batch
### Actual Results
```console
results:
PLAY [all] ***************************************************************************************************************************************************************************************************************************************************************************************************
TASK [include_role : fail] ***********************************************************************************************************************************************************************************************************************************************************************************
TASK [fail : include_tasks] **********************************************************************************************************************************************************************************************************************************************************************************
included: roles/fail/tasks/validation.yml for a, b
TASK [fail : fail] *******************************************************************************************************************************************************************************************************************************************************************************************
skipping: [b]
fatal: [a]: FAILED! => {"changed": false, "msg": "Failed as requested from task"}
TASK [fail : debug] ******************************************************************************************************************************************************************************************************************************************************************************************
ok: [a] => {
"msg": "Failed for a"
}
TASK [fail : include_tasks] **********************************************************************************************************************************************************************************************************************************************************************************
included: roles/fail/tasks/validation.yml for a
TASK [fail : fail] *******************************************************************************************************************************************************************************************************************************************************************************************
fatal: [a]: FAILED! => {"changed": false, "msg": "Failed as requested from task"}
TASK [debug] *************************************************************************************************************************************************************************************************************************************************************************************************
ok: [b] => (item=ansible_play_hosts_all) => {
"msg": "ansible_play_hosts_all is: ['a', 'b']"
}
ok: [b] => (item=ansible_play_hosts) => {
"msg": "ansible_play_hosts is: ['a', 'b']"
}
ok: [b] => (item=ansible_play_batch) => {
"msg": "ansible_play_batch is: ['a', 'b']"
}
PLAY [all] ***************************************************************************************************************************************************************************************************************************************************************************************************
TASK [debug] *************************************************************************************************************************************************************************************************************************************************************************************************
ok: [b] => {
"msg": "second play for b"
}
PLAY RECAP ***************************************************************************************************************************************************************************************************************************************************************************************************
a : ok=3 changed=0 unreachable=0 failed=1 skipped=0 rescued=1 ignored=0
b : ok=3 changed=0 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78612
|
https://github.com/ansible/ansible/pull/78657
|
38c3b5e8f1cd6898b8dfe54f1849c8e424b5dd49
|
19e7c5b0c1d5f682d2cbbae0ba8fd5eef653ba13
| 2022-08-22T14:16:46Z |
python
| 2022-08-30T15:16:21Z |
lib/ansible/executor/play_iterator.py
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
from enum import IntEnum, IntFlag
from ansible import constants as C
from ansible.errors import AnsibleAssertionError
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.utils.display import Display
display = Display()
__all__ = ['PlayIterator', 'IteratingStates', 'FailedStates']
class IteratingStates(IntEnum):
SETUP = 0
TASKS = 1
RESCUE = 2
ALWAYS = 3
HANDLERS = 4
COMPLETE = 5
class FailedStates(IntFlag):
NONE = 0
SETUP = 1
TASKS = 2
RESCUE = 4
ALWAYS = 8
HANDLERS = 16
class HostState:
def __init__(self, blocks):
self._blocks = blocks[:]
self.handlers = []
self.cur_block = 0
self.cur_regular_task = 0
self.cur_rescue_task = 0
self.cur_always_task = 0
self.cur_handlers_task = 0
self.run_state = IteratingStates.SETUP
self.fail_state = FailedStates.NONE
self.pre_flushing_run_state = None
self.update_handlers = True
self.pending_setup = False
self.tasks_child_state = None
self.rescue_child_state = None
self.always_child_state = None
self.did_rescue = False
self.did_start_at_task = False
def __repr__(self):
return "HostState(%r)" % self._blocks
def __str__(self):
return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, handlers=%d, run_state=%s, fail_state=%s, "
"pre_flushing_run_state=%s, update_handlers=%s, pending_setup=%s, "
"tasks child state? (%s), rescue child state? (%s), always child state? (%s), "
"did rescue? %s, did start at task? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
self.cur_always_task,
self.cur_handlers_task,
self.run_state,
self.fail_state,
self.pre_flushing_run_state,
self.update_handlers,
self.pending_setup,
self.tasks_child_state,
self.rescue_child_state,
self.always_child_state,
self.did_rescue,
self.did_start_at_task,
))
def __eq__(self, other):
if not isinstance(other, HostState):
return False
for attr in ('_blocks',
'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task', 'cur_handlers_task',
'run_state', 'fail_state', 'pre_flushing_run_state', 'update_handlers', 'pending_setup',
'tasks_child_state', 'rescue_child_state', 'always_child_state'):
if getattr(self, attr) != getattr(other, attr):
return False
return True
def get_current_block(self):
return self._blocks[self.cur_block]
def copy(self):
new_state = HostState(self._blocks)
new_state.handlers = self.handlers[:]
new_state.cur_block = self.cur_block
new_state.cur_regular_task = self.cur_regular_task
new_state.cur_rescue_task = self.cur_rescue_task
new_state.cur_always_task = self.cur_always_task
new_state.cur_handlers_task = self.cur_handlers_task
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pre_flushing_run_state = self.pre_flushing_run_state
new_state.update_handlers = self.update_handlers
new_state.pending_setup = self.pending_setup
new_state.did_rescue = self.did_rescue
new_state.did_start_at_task = self.did_start_at_task
if self.tasks_child_state is not None:
new_state.tasks_child_state = self.tasks_child_state.copy()
if self.rescue_child_state is not None:
new_state.rescue_child_state = self.rescue_child_state.copy()
if self.always_child_state is not None:
new_state.always_child_state = self.always_child_state.copy()
return new_state
class PlayIterator:
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
self._play = play
self._blocks = []
self._variable_manager = variable_manager
setup_block = Block(play=self._play)
# Gathering facts with run_once would copy the facts from one host to
# the others.
setup_block.run_once = False
setup_task = Task(block=setup_block)
setup_task.action = 'gather_facts'
# TODO: hardcoded resolution here, but should use actual resolution code in the end,
# in case of 'legacy' mismatch
setup_task.resolved_action = 'ansible.builtin.gather_facts'
setup_task.name = 'Gathering Facts'
setup_task.args = {}
# Unless play is specifically tagged, gathering should 'always' run
if not self._play.tags:
setup_task.tags = ['always']
# Default options to gather
for option in ('gather_subset', 'gather_timeout', 'fact_path'):
value = getattr(self._play, option, None)
if value is not None:
setup_task.args[option] = value
setup_task.set_loader(self._play._loader)
# short circuit fact gathering if the entire playbook is conditional
if self._play._included_conditional is not None:
setup_task.when = self._play._included_conditional[:]
setup_block.block = [setup_task]
setup_block = setup_block.filter_tagged_tasks(all_vars)
self._blocks.append(setup_block)
# keep flatten (no blocks) list of all tasks from the play
# used for the lockstep mechanism in the linear strategy
self.all_tasks = setup_block.get_tasks()
for block in self._play.compile():
new_block = block.filter_tagged_tasks(all_vars)
if new_block.has_tasks():
self._blocks.append(new_block)
self.all_tasks.extend(new_block.get_tasks())
# keep list of all handlers, it is copied into each HostState
# at the beginning of IteratingStates.HANDLERS
# the copy happens at each flush in order to restore the original
# list and remove any included handlers that might not be notified
# at the particular flush
self.handlers = [h for b in self._play.handlers for h in b.block]
self._host_states = {}
start_at_matched = False
batch = inventory.get_hosts(self._play.hosts, order=self._play.order)
self.batch_size = len(batch)
for host in batch:
self.set_state_for_host(host.name, HostState(blocks=self._blocks))
# if we're looking to start at a specific task, iterate through
# the tasks for this host until we find the specified task
if play_context.start_at_task is not None and not start_at_done:
while True:
(s, task) = self.get_next_task_for_host(host, peek=True)
if s.run_state == IteratingStates.COMPLETE:
break
if task.name == play_context.start_at_task or (task.name and fnmatch.fnmatch(task.name, play_context.start_at_task)) or \
task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
start_at_matched = True
break
self.set_state_for_host(host.name, s)
# finally, reset the host's state to IteratingStates.SETUP
if start_at_matched:
self._host_states[host.name].did_start_at_task = True
self._host_states[host.name].run_state = IteratingStates.SETUP
if start_at_matched:
# we have our match, so clear the start_at_task field on the
# play context to flag that we've started at a task (and future
# plays won't try to advance)
play_context.start_at_task = None
self.end_play = False
self.cur_task = 0
def get_host_state(self, host):
# Since we're using the PlayIterator to carry forward failed hosts,
# in the event that a previous host was not in the current inventory
# we create a stub state for it now
if host.name not in self._host_states:
self.set_state_for_host(host.name, HostState(blocks=[]))
return self._host_states[host.name].copy()
def cache_block_tasks(self, block):
display.deprecated(
'PlayIterator.cache_block_tasks is now noop due to the changes '
'in the way tasks are cached and is deprecated.',
version=2.16
)
def get_next_task_for_host(self, host, peek=False):
display.debug("getting the next task for host %s" % host.name)
s = self.get_host_state(host)
task = None
if s.run_state == IteratingStates.COMPLETE:
display.debug("host %s is done iterating, returning" % host.name)
return (s, None)
(s, task) = self._get_next_task_from_state(s, host=host)
if not peek:
self.set_state_for_host(host.name, s)
display.debug("done getting next task for host %s" % host.name)
display.debug(" ^ task is: %s" % task)
display.debug(" ^ state is: %s" % s)
return (s, task)
def _get_next_task_from_state(self, state, host):
task = None
# try and find the next task, given the current state.
while True:
# try to get the current block from the list of blocks, and
# if we run past the end of the list we know we're done with
# this block
try:
block = state._blocks[state.cur_block]
except IndexError:
state.run_state = IteratingStates.COMPLETE
return (state, None)
if state.run_state == IteratingStates.SETUP:
# First, we check to see if we were pending setup. If not, this is
# the first trip through IteratingStates.SETUP, so we set the pending_setup
# flag and try to determine if we do in fact want to gather facts for
# the specified host.
if not state.pending_setup:
state.pending_setup = True
# Gather facts if the default is 'smart' and we have not yet
# done it for this host; or if 'explicit' and the play sets
# gather_facts to True; or if 'implicit' and the play does
# NOT explicitly set gather_facts to False.
gathering = C.DEFAULT_GATHERING
implied = self._play.gather_facts is None or boolean(self._play.gather_facts, strict=False)
if (gathering == 'implicit' and implied) or \
(gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \
(gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('_ansible_facts_gathered', False))):
# The setup block is always self._blocks[0], as we inject it
# during the play compilation in __init__ above.
setup_block = self._blocks[0]
if setup_block.has_tasks() and len(setup_block.block) > 0:
task = setup_block.block[0]
else:
# This is the second trip through IteratingStates.SETUP, so we clear
# the flag and move onto the next block in the list while setting
# the run state to IteratingStates.TASKS
state.pending_setup = False
state.run_state = IteratingStates.TASKS
if not state.did_start_at_task:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
elif state.run_state == IteratingStates.TASKS:
# clear the pending setup flag, since we're past that and it didn't fail
if state.pending_setup:
state.pending_setup = False
# First, we check for a child task state that is not failed, and if we
# have one recurse into it for the next task. If we're done with the child
# state, we clear it and drop back to getting the next task from the list.
if state.tasks_child_state:
(state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host)
if self._check_failed_state(state.tasks_child_state):
# failed child state, so clear it and move into the rescue portion
state.tasks_child_state = None
self._set_failed_state(state)
else:
# get the next task recursively
if task is None or state.tasks_child_state.run_state == IteratingStates.COMPLETE:
# we're done with the child state, so clear it and continue
# back to the top of the loop to get the next task
state.tasks_child_state = None
continue
else:
# First here, we check to see if we've failed anywhere down the chain
# of states we have, and if so we move onto the rescue portion. Otherwise,
# we check to see if we've moved past the end of the list of tasks. If so,
# we move into the always portion of the block, otherwise we get the next
# task from the list.
if self._check_failed_state(state):
state.run_state = IteratingStates.RESCUE
elif state.cur_regular_task >= len(block.block):
state.run_state = IteratingStates.ALWAYS
else:
task = block.block[state.cur_regular_task]
# if the current task is actually a child block, create a child
# state for us to recurse into on the next pass
if isinstance(task, Block):
state.tasks_child_state = HostState(blocks=[task])
state.tasks_child_state.run_state = IteratingStates.TASKS
# since we've created the child state, clear the task
# so we can pick up the child state on the next pass
task = None
state.cur_regular_task += 1
elif state.run_state == IteratingStates.RESCUE:
# The process here is identical to IteratingStates.TASKS, except instead
# we move into the always portion of the block.
if host.name in self._play._removed_hosts:
self._play._removed_hosts.remove(host.name)
if state.rescue_child_state:
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host)
if self._check_failed_state(state.rescue_child_state):
state.rescue_child_state = None
self._set_failed_state(state)
else:
if task is None or state.rescue_child_state.run_state == IteratingStates.COMPLETE:
state.rescue_child_state = None
continue
else:
if state.fail_state & FailedStates.RESCUE == FailedStates.RESCUE:
state.run_state = IteratingStates.ALWAYS
elif state.cur_rescue_task >= len(block.rescue):
if len(block.rescue) > 0:
state.fail_state = FailedStates.NONE
state.run_state = IteratingStates.ALWAYS
state.did_rescue = True
else:
task = block.rescue[state.cur_rescue_task]
if isinstance(task, Block):
state.rescue_child_state = HostState(blocks=[task])
state.rescue_child_state.run_state = IteratingStates.TASKS
task = None
state.cur_rescue_task += 1
elif state.run_state == IteratingStates.ALWAYS:
# And again, the process here is identical to IteratingStates.TASKS, except
# instead we either move onto the next block in the list, or we set the
# run state to IteratingStates.COMPLETE in the event of any errors, or when we
# have hit the end of the list of blocks.
if state.always_child_state:
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host)
if self._check_failed_state(state.always_child_state):
state.always_child_state = None
self._set_failed_state(state)
else:
if task is None or state.always_child_state.run_state == IteratingStates.COMPLETE:
state.always_child_state = None
continue
else:
if state.cur_always_task >= len(block.always):
if state.fail_state != FailedStates.NONE:
state.run_state = IteratingStates.COMPLETE
else:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.run_state = IteratingStates.TASKS
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
state.did_rescue = False
else:
task = block.always[state.cur_always_task]
if isinstance(task, Block):
state.always_child_state = HostState(blocks=[task])
state.always_child_state.run_state = IteratingStates.TASKS
task = None
state.cur_always_task += 1
elif state.run_state == IteratingStates.HANDLERS:
if state.update_handlers:
# reset handlers for HostState since handlers from include_tasks
# might be there from previous flush
state.handlers = self.handlers[:]
state.update_handlers = False
state.cur_handlers_task = 0
if state.fail_state & FailedStates.HANDLERS == FailedStates.HANDLERS:
state.update_handlers = True
state.run_state = IteratingStates.COMPLETE
else:
while True:
try:
task = state.handlers[state.cur_handlers_task]
except IndexError:
task = None
state.run_state = state.pre_flushing_run_state
state.update_handlers = True
break
else:
state.cur_handlers_task += 1
if task.is_host_notified(host):
break
elif state.run_state == IteratingStates.COMPLETE:
return (state, None)
# if something above set the task, break out of the loop now
if task:
break
return (state, task)
def _set_failed_state(self, state):
if state.run_state == IteratingStates.SETUP:
state.fail_state |= FailedStates.SETUP
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.TASKS:
if state.tasks_child_state is not None:
state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
else:
state.fail_state |= FailedStates.TASKS
if state._blocks[state.cur_block].rescue:
state.run_state = IteratingStates.RESCUE
elif state._blocks[state.cur_block].always:
state.run_state = IteratingStates.ALWAYS
else:
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.RESCUE:
if state.rescue_child_state is not None:
state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
else:
state.fail_state |= FailedStates.RESCUE
if state._blocks[state.cur_block].always:
state.run_state = IteratingStates.ALWAYS
else:
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.ALWAYS:
if state.always_child_state is not None:
state.always_child_state = self._set_failed_state(state.always_child_state)
else:
state.fail_state |= FailedStates.ALWAYS
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.HANDLERS:
state.fail_state |= FailedStates.HANDLERS
state.update_handlers = True
if state._blocks[state.cur_block].rescue:
state.run_state = IteratingStates.RESCUE
elif state._blocks[state.cur_block].always:
state.run_state = IteratingStates.ALWAYS
else:
state.run_state = IteratingStates.COMPLETE
return state
def mark_host_failed(self, host):
s = self.get_host_state(host)
display.debug("marking host %s failed, current state: %s" % (host, s))
s = self._set_failed_state(s)
display.debug("^ failed state is now: %s" % s)
self.set_state_for_host(host.name, s)
self._play._removed_hosts.append(host.name)
def get_failed_hosts(self):
return dict((host, True) for (host, state) in self._host_states.items() if self._check_failed_state(state))
def _check_failed_state(self, state):
if state is None:
return False
elif state.run_state == IteratingStates.RESCUE and self._check_failed_state(state.rescue_child_state):
return True
elif state.run_state == IteratingStates.ALWAYS and self._check_failed_state(state.always_child_state):
return True
elif state.run_state == IteratingStates.HANDLERS and state.fail_state & FailedStates.HANDLERS == FailedStates.HANDLERS:
return True
elif state.fail_state != FailedStates.NONE:
if state.run_state == IteratingStates.RESCUE and state.fail_state & FailedStates.RESCUE == 0:
return False
elif state.run_state == IteratingStates.ALWAYS and state.fail_state & FailedStates.ALWAYS == 0:
return False
else:
return not (state.did_rescue and state.fail_state & FailedStates.ALWAYS == 0)
elif state.run_state == IteratingStates.TASKS and self._check_failed_state(state.tasks_child_state):
cur_block = state._blocks[state.cur_block]
if len(cur_block.rescue) > 0 and state.fail_state & FailedStates.RESCUE == 0:
return False
else:
return True
return False
def is_failed(self, host):
s = self.get_host_state(host)
return self._check_failed_state(s)
def clear_host_errors(self, host):
self._clear_state_errors(self.get_state_for_host(host.name))
def _clear_state_errors(self, state: HostState) -> None:
state.fail_state = FailedStates.NONE
if state.tasks_child_state is not None:
self._clear_state_errors(state.tasks_child_state)
elif state.rescue_child_state is not None:
self._clear_state_errors(state.rescue_child_state)
elif state.always_child_state is not None:
self._clear_state_errors(state.always_child_state)
def get_active_state(self, state):
'''
Finds the active state, recursively if necessary when there are child states.
'''
if state.run_state == IteratingStates.TASKS and state.tasks_child_state is not None:
return self.get_active_state(state.tasks_child_state)
elif state.run_state == IteratingStates.RESCUE and state.rescue_child_state is not None:
return self.get_active_state(state.rescue_child_state)
elif state.run_state == IteratingStates.ALWAYS and state.always_child_state is not None:
return self.get_active_state(state.always_child_state)
return state
def is_any_block_rescuing(self, state):
'''
Given the current HostState state, determines if the current block, or any child blocks,
are in rescue mode.
'''
if state.run_state == IteratingStates.RESCUE:
return True
if state.tasks_child_state is not None:
return self.is_any_block_rescuing(state.tasks_child_state)
return False
def get_original_task(self, host, task):
display.deprecated(
'PlayIterator.get_original_task is now noop due to the changes '
'in the way tasks are cached and is deprecated.',
version=2.16
)
return (None, None)
def _insert_tasks_into_state(self, state, task_list):
# if we've failed at all, or if the task list is empty, just return the current state
if (state.fail_state != FailedStates.NONE and state.run_state == IteratingStates.TASKS) or not task_list:
return state
if state.run_state == IteratingStates.TASKS:
if state.tasks_child_state:
state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.block[:state.cur_regular_task]
after = target_block.block[state.cur_regular_task:]
target_block.block = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.RESCUE:
if state.rescue_child_state:
state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.rescue[:state.cur_rescue_task]
after = target_block.rescue[state.cur_rescue_task:]
target_block.rescue = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.ALWAYS:
if state.always_child_state:
state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.always[:state.cur_always_task]
after = target_block.always[state.cur_always_task:]
target_block.always = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.HANDLERS:
state.handlers[state.cur_handlers_task:state.cur_handlers_task] = [h for b in task_list for h in b.block]
return state
def add_tasks(self, host, task_list):
self.set_state_for_host(host.name, self._insert_tasks_into_state(self.get_host_state(host), task_list))
@property
def host_states(self):
return self._host_states
def get_state_for_host(self, hostname: str) -> HostState:
return self._host_states[hostname]
def set_state_for_host(self, hostname: str, state: HostState) -> None:
if not isinstance(state, HostState):
raise AnsibleAssertionError('Expected state to be a HostState but was a %s' % type(state))
self._host_states[hostname] = state
def set_run_state_for_host(self, hostname: str, run_state: IteratingStates) -> None:
if not isinstance(run_state, IteratingStates):
raise AnsibleAssertionError('Expected run_state to be a IteratingStates but was %s' % (type(run_state)))
self._host_states[hostname].run_state = run_state
def set_fail_state_for_host(self, hostname: str, fail_state: FailedStates) -> None:
if not isinstance(fail_state, FailedStates):
raise AnsibleAssertionError('Expected fail_state to be a FailedStates but was %s' % (type(fail_state)))
self._host_states[hostname].fail_state = fail_state
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,612 |
failed task inside an include_tasks inside a rescue block does not remove host from ansible_play_hosts nor from ansible_play_batch
|
### Summary
I have created a role that has two tasks files: main and validation. The main task file uses block/rescue to validate the health of a system, with the rescue block trying to recover a failed system and then re-running the validation tasks. If the system still reports failure after an attempted recovery, I expect the failed host to be removed from `ansible_play_hosts` and `ansible_play_batch`; however they persist. Interestingly, the failed host(s) are excluded from future tasks in the play, and are excluded from subsequent plays in a playbook. I was only able to reproduce this if the rescue block had a failure from an `include_tasks`.
### Issue Type
Bug Report
### Component Name
rescue
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.3]
config file = None
python version = 3.10.4 (main, Jun 29 2022, 12:14:53) [GCC 11.2.0]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
```
### OS / Environment
ubuntu 22.04
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
inventory:
```
[all]
a
b
```
roles/fail/tasks/validation.yml:
```yaml
- fail:
when: "inventory_hostname == 'a'"
```
roles/fail/tasks/main.yml:
```yaml
- block:
- include_tasks: "validation.yml"
rescue:
- debug:
msg: "Failed for {{ inventory_hostname }}"
- include_tasks: "validation.yml"
```
playbook:
```yaml
- hosts: "all"
gather_facts: false
tasks:
- include_role:
name: "fail"
- debug:
msg: "{{ item }} is: {{ lookup('vars', item) }}"
loop:
- "ansible_play_hosts_all"
- "ansible_play_hosts"
- "ansible_play_batch"
- hosts: "all"
gather_facts: false
tasks:
- debug:
msg: "second play for {{ inventory_hostname }}"
```
### Expected Results
I expected host a to be removed from ansible_play_hosts and ansible_play_batch
### Actual Results
```console
results:
PLAY [all] ***************************************************************************************************************************************************************************************************************************************************************************************************
TASK [include_role : fail] ***********************************************************************************************************************************************************************************************************************************************************************************
TASK [fail : include_tasks] **********************************************************************************************************************************************************************************************************************************************************************************
included: roles/fail/tasks/validation.yml for a, b
TASK [fail : fail] *******************************************************************************************************************************************************************************************************************************************************************************************
skipping: [b]
fatal: [a]: FAILED! => {"changed": false, "msg": "Failed as requested from task"}
TASK [fail : debug] ******************************************************************************************************************************************************************************************************************************************************************************************
ok: [a] => {
"msg": "Failed for a"
}
TASK [fail : include_tasks] **********************************************************************************************************************************************************************************************************************************************************************************
included: roles/fail/tasks/validation.yml for a
TASK [fail : fail] *******************************************************************************************************************************************************************************************************************************************************************************************
fatal: [a]: FAILED! => {"changed": false, "msg": "Failed as requested from task"}
TASK [debug] *************************************************************************************************************************************************************************************************************************************************************************************************
ok: [b] => (item=ansible_play_hosts_all) => {
"msg": "ansible_play_hosts_all is: ['a', 'b']"
}
ok: [b] => (item=ansible_play_hosts) => {
"msg": "ansible_play_hosts is: ['a', 'b']"
}
ok: [b] => (item=ansible_play_batch) => {
"msg": "ansible_play_batch is: ['a', 'b']"
}
PLAY [all] ***************************************************************************************************************************************************************************************************************************************************************************************************
TASK [debug] *************************************************************************************************************************************************************************************************************************************************************************************************
ok: [b] => {
"msg": "second play for b"
}
PLAY RECAP ***************************************************************************************************************************************************************************************************************************************************************************************************
a : ok=3 changed=0 unreachable=0 failed=1 skipped=0 rescued=1 ignored=0
b : ok=3 changed=0 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78612
|
https://github.com/ansible/ansible/pull/78657
|
38c3b5e8f1cd6898b8dfe54f1849c8e424b5dd49
|
19e7c5b0c1d5f682d2cbbae0ba8fd5eef653ba13
| 2022-08-22T14:16:46Z |
python
| 2022-08-30T15:16:21Z |
lib/ansible/plugins/strategy/__init__.py
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import cmd
import functools
import os
import pprint
import queue
import sys
import threading
import time
from collections import deque
from multiprocessing import Lock
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleUndefinedVariable, AnsibleParserError
from ansible.executor import action_write_locks
from ansible.executor.play_iterator import IteratingStates
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_result import TaskResult
from ansible.executor.task_queue_manager import CallbackSend, DisplaySend
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.playbook.conditional import Conditional
from ansible.playbook.handler import Handler
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.plugins import loader as plugin_loader
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.fqcn import add_internal_fqcns
from ansible.utils.unsafe_proxy import wrap_var
from ansible.utils.vars import combine_vars
from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
display = Display()
__all__ = ['StrategyBase']
# This list can be an exact match, or start of string bound
# does not accept regex
ALWAYS_DELEGATE_FACT_PREFIXES = frozenset((
'discovered_interpreter_',
))
class StrategySentinel:
pass
_sentinel = StrategySentinel()
def post_process_whens(result, task, templar, task_vars):
cond = None
if task.changed_when:
with templar.set_temporary_context(available_variables=task_vars):
cond = Conditional(loader=templar._loader)
cond.when = task.changed_when
result['changed'] = cond.evaluate_conditional(templar, templar.available_variables)
if task.failed_when:
with templar.set_temporary_context(available_variables=task_vars):
if cond is None:
cond = Conditional(loader=templar._loader)
cond.when = task.failed_when
failed_when_result = cond.evaluate_conditional(templar, templar.available_variables)
result['failed_when_result'] = result['failed'] = failed_when_result
def _get_item_vars(result, task):
item_vars = {}
if task.loop or task.loop_with:
loop_var = result.get('ansible_loop_var', 'item')
index_var = result.get('ansible_index_var')
if loop_var in result:
item_vars[loop_var] = result[loop_var]
if index_var and index_var in result:
item_vars[index_var] = result[index_var]
if '_ansible_item_label' in result:
item_vars['_ansible_item_label'] = result['_ansible_item_label']
if 'ansible_loop' in result:
item_vars['ansible_loop'] = result['ansible_loop']
return item_vars
def results_thread_main(strategy):
while True:
try:
result = strategy._final_q.get()
if isinstance(result, StrategySentinel):
break
elif isinstance(result, DisplaySend):
display.display(*result.args, **result.kwargs)
elif isinstance(result, CallbackSend):
for arg in result.args:
if isinstance(arg, TaskResult):
strategy.normalize_task_result(arg)
break
strategy._tqm.send_callback(result.method_name, *result.args, **result.kwargs)
elif isinstance(result, TaskResult):
strategy.normalize_task_result(result)
with strategy._results_lock:
strategy._results.append(result)
else:
display.warning('Received an invalid object (%s) in the result queue: %r' % (type(result), result))
except (IOError, EOFError):
break
except queue.Empty:
pass
def debug_closure(func):
"""Closure to wrap ``StrategyBase._process_pending_results`` and invoke the task debugger"""
@functools.wraps(func)
def inner(self, iterator, one_pass=False, max_passes=None):
status_to_stats_map = (
('is_failed', 'failures'),
('is_unreachable', 'dark'),
('is_changed', 'changed'),
('is_skipped', 'skipped'),
)
# We don't know the host yet, copy the previous states, for lookup after we process new results
prev_host_states = iterator.host_states.copy()
results = func(self, iterator, one_pass=one_pass, max_passes=max_passes)
_processed_results = []
for result in results:
task = result._task
host = result._host
_queued_task_args = self._queued_task_cache.pop((host.name, task._uuid), None)
task_vars = _queued_task_args['task_vars']
play_context = _queued_task_args['play_context']
# Try to grab the previous host state, if it doesn't exist use get_host_state to generate an empty state
try:
prev_host_state = prev_host_states[host.name]
except KeyError:
prev_host_state = iterator.get_host_state(host)
while result.needs_debugger(globally_enabled=self.debugger_active):
next_action = NextAction()
dbg = Debugger(task, host, task_vars, play_context, result, next_action)
dbg.cmdloop()
if next_action.result == NextAction.REDO:
# rollback host state
self._tqm.clear_failed_hosts()
if task.run_once and iterator._play.strategy in add_internal_fqcns(('linear',)) and result.is_failed():
for host_name, state in prev_host_states.items():
if host_name == host.name:
continue
iterator.set_state_for_host(host_name, state)
iterator._play._removed_hosts.remove(host_name)
iterator.set_state_for_host(host.name, prev_host_state)
for method, what in status_to_stats_map:
if getattr(result, method)():
self._tqm._stats.decrement(what, host.name)
self._tqm._stats.decrement('ok', host.name)
# redo
self._queue_task(host, task, task_vars, play_context)
_processed_results.extend(debug_closure(func)(self, iterator, one_pass))
break
elif next_action.result == NextAction.CONTINUE:
_processed_results.append(result)
break
elif next_action.result == NextAction.EXIT:
# Matches KeyboardInterrupt from bin/ansible
sys.exit(99)
else:
_processed_results.append(result)
return _processed_results
return inner
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
# by default, strategies should support throttling but we allow individual
# strategies to disable this and either forego supporting it or managing
# the throttling internally (as `free` does)
ALLOW_BASE_THROTTLING = True
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm._workers
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
self._step = context.CLIARGS.get('step', False)
self._diff = context.CLIARGS.get('diff', False)
# the task cache is a dictionary of tuples of (host.name, task._uuid)
# used to find the original task object of in-flight tasks and to store
# the task args/vars and play context info used to queue the task.
self._queued_task_cache = {}
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
# internal counters
self._pending_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
self._results = deque()
self._results_lock = threading.Condition(threading.Lock())
# create the result processing thread for reading results in the background
self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
self._results_thread.daemon = True
self._results_thread.start()
# holds the list of active (persistent) connections to be shutdown at
# play completion
self._active_connections = dict()
# Caches for get_host calls, to avoid calling excessively
# These values should be set at the top of the ``run`` method of each
# strategy plugin. Use ``_set_hosts_cache`` to set these values
self._hosts_cache = []
self._hosts_cache_all = []
self.debugger_active = C.ENABLE_TASK_DEBUGGER
def _set_hosts_cache(self, play, refresh=True):
"""Responsible for setting _hosts_cache and _hosts_cache_all
See comment in ``__init__`` for the purpose of these caches
"""
if not refresh and all((self._hosts_cache, self._hosts_cache_all)):
return
if not play.finalized and Templar(None).is_template(play.hosts):
_pattern = 'all'
else:
_pattern = play.hosts or 'all'
self._hosts_cache_all = [h.name for h in self._inventory.get_hosts(pattern=_pattern, ignore_restrictions=True)]
self._hosts_cache = [h.name for h in self._inventory.get_hosts(play.hosts, order=play.order)]
def cleanup(self):
# close active persistent connections
for sock in self._active_connections.values():
try:
conn = Connection(sock)
conn.reset()
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
self._final_q.put(_sentinel)
self._results_thread.join()
def run(self, iterator, play_context, result=0):
# execute one more pass through the iterator without peeking, to
# make sure that all of the hosts are advanced to their final task.
# This should be safe, as everything should be IteratingStates.COMPLETE by
# this point, though the strategy may not advance the hosts itself.
for host in self._hosts_cache:
if host not in self._tqm._unreachable_hosts:
try:
iterator.get_next_task_for_host(self._inventory.hosts[host])
except KeyError:
iterator.get_next_task_for_host(self._inventory.get_host(host))
# return the appropriate code, depending on the status hosts after the run
if not isinstance(result, bool) and result != self._tqm.RUN_OK:
return result
elif len(self._tqm._unreachable_hosts.keys()) > 0:
return self._tqm.RUN_UNREACHABLE_HOSTS
elif len(iterator.get_failed_hosts()) > 0:
return self._tqm.RUN_FAILED_HOSTS
else:
return self._tqm.RUN_OK
def get_hosts_remaining(self, play):
self._set_hosts_cache(play, refresh=False)
ignore = set(self._tqm._failed_hosts).union(self._tqm._unreachable_hosts)
return [host for host in self._hosts_cache if host not in ignore]
def get_failed_hosts(self, play):
self._set_hosts_cache(play, refresh=False)
return [host for host in self._hosts_cache if host in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
'''
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
'''
vars['ansible_current_hosts'] = self.get_hosts_remaining(play)
vars['ansible_failed_hosts'] = self.get_failed_hosts(play)
def _queue_task(self, host, task, task_vars, play_context):
''' handles queueing the task up to be sent to a worker '''
display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
# Add a write lock for tasks.
# Maybe this should be added somewhere further up the call stack but
# this is the earliest in the code where we have task (1) extracted
# into its own variable and (2) there's only a single code path
# leading to the module being run. This is called by two
# functions: linear.py::run(), and
# free.py::run() so we'd have to add to both to do it there.
# The next common higher level is __init__.py::run() and that has
# tasks inside of play_iterator so we'd have to extract them to do it
# there.
if task.action not in action_write_locks.action_write_locks:
display.debug('Creating lock for %s' % task.action)
action_write_locks.action_write_locks[task.action] = Lock()
# create a templar and template things we need later for the queuing process
templar = Templar(loader=self._loader, variables=task_vars)
try:
throttle = int(templar.template(task.throttle))
except Exception as e:
raise AnsibleError("Failed to convert the throttle value to an integer.", obj=task._ds, orig_exc=e)
# and then queue the new task
try:
# Determine the "rewind point" of the worker list. This means we start
# iterating over the list of workers until the end of the list is found.
# Normally, that is simply the length of the workers list (as determined
# by the forks or serial setting), however a task/block/play may "throttle"
# that limit down.
rewind_point = len(self._workers)
if throttle > 0 and self.ALLOW_BASE_THROTTLING:
if task.run_once:
display.debug("Ignoring 'throttle' as 'run_once' is also set for '%s'" % task.get_name())
else:
if throttle <= rewind_point:
display.debug("task: %s, throttle: %d" % (task.get_name(), throttle))
rewind_point = throttle
queued = False
starting_worker = self._cur_worker
while True:
if self._cur_worker >= rewind_point:
self._cur_worker = 0
worker_prc = self._workers[self._cur_worker]
if worker_prc is None or not worker_prc.is_alive():
self._queued_task_cache[(host.name, task._uuid)] = {
'host': host,
'task': task,
'task_vars': task_vars,
'play_context': play_context
}
worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, plugin_loader)
self._workers[self._cur_worker] = worker_prc
self._tqm.send_callback('v2_runner_on_start', host, task)
worker_prc.start()
display.debug("worker is %d (out of %d available)" % (self._cur_worker + 1, len(self._workers)))
queued = True
self._cur_worker += 1
if self._cur_worker >= rewind_point:
self._cur_worker = 0
if queued:
break
elif self._cur_worker == starting_worker:
time.sleep(0.0001)
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
display.debug("got an error while queuing: %s" % e)
return
display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
def get_task_hosts(self, iterator, task_host, task):
if task.run_once:
host_list = [host for host in self._hosts_cache if host not in self._tqm._unreachable_hosts]
else:
host_list = [task_host.name]
return host_list
def get_delegated_hosts(self, result, task):
host_name = result.get('_ansible_delegated_vars', {}).get('ansible_delegated_host', None)
return [host_name or task.delegate_to]
def _set_always_delegated_facts(self, result, task):
"""Sets host facts for ``delegate_to`` hosts for facts that should
always be delegated
This operation mutates ``result`` to remove the always delegated facts
See ``ALWAYS_DELEGATE_FACT_PREFIXES``
"""
if task.delegate_to is None:
return
facts = result['ansible_facts']
always_keys = set()
_add = always_keys.add
for fact_key in facts:
for always_key in ALWAYS_DELEGATE_FACT_PREFIXES:
if fact_key.startswith(always_key):
_add(fact_key)
if always_keys:
_pop = facts.pop
always_facts = {
'ansible_facts': dict((k, _pop(k)) for k in list(facts) if k in always_keys)
}
host_list = self.get_delegated_hosts(result, task)
_set_host_facts = self._variable_manager.set_host_facts
for target_host in host_list:
_set_host_facts(target_host, always_facts)
def normalize_task_result(self, task_result):
"""Normalize a TaskResult to reference actual Host and Task objects
when only given the ``Host.name``, or the ``Task._uuid``
Only the ``Host.name`` and ``Task._uuid`` are commonly sent back from
the ``TaskExecutor`` or ``WorkerProcess`` due to performance concerns
Mutates the original object
"""
if isinstance(task_result._host, string_types):
# If the value is a string, it is ``Host.name``
task_result._host = self._inventory.get_host(to_text(task_result._host))
if isinstance(task_result._task, string_types):
# If the value is a string, it is ``Task._uuid``
queue_cache_entry = (task_result._host.name, task_result._task)
try:
found_task = self._queued_task_cache[queue_cache_entry]['task']
except KeyError:
# This should only happen due to an implicit task created by the
# TaskExecutor, restrict this behavior to the explicit use case
# of an implicit async_status task
if task_result._task_fields.get('action') != 'async_status':
raise
original_task = Task()
else:
original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
original_task._parent = found_task._parent
original_task.from_attrs(task_result._task_fields)
task_result._task = original_task
return task_result
@debug_closure
def _process_pending_results(self, iterator, one_pass=False, max_passes=None):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
handler_templar = Templar(self._loader)
def search_handler_blocks_by_name(handler_name, handler_blocks):
# iterate in reversed order since last handler loaded with the same name wins
for handler_block in reversed(handler_blocks):
for handler_task in handler_block.block:
if handler_task.name:
try:
if not handler_task.cached_name:
if handler_templar.is_template(handler_task.name):
handler_templar.available_variables = self._variable_manager.get_vars(play=iterator._play,
task=handler_task,
_hosts=self._hosts_cache,
_hosts_all=self._hosts_cache_all)
handler_task.name = handler_templar.template(handler_task.name)
handler_task.cached_name = True
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
candidates = (
handler_task.name,
handler_task.get_name(include_role_fqcn=False),
handler_task.get_name(include_role_fqcn=True),
)
if handler_name in candidates:
return handler_task
except (UndefinedError, AnsibleUndefinedVariable) as e:
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
if not handler_task.listen:
display.warning(
"Handler '%s' is unusable because it has no listen topics and "
"the name could not be templated (host-specific variables are "
"not supported in handler names). The error: %s" % (handler_task.name, to_text(e))
)
continue
cur_pass = 0
while True:
try:
self._results_lock.acquire()
task_result = self._results.popleft()
except IndexError:
break
finally:
self._results_lock.release()
original_host = task_result._host
original_task = task_result._task
# all host status messages contain 2 entries: (msg, task_result)
role_ran = False
if task_result.is_failed():
role_ran = True
ignore_errors = original_task.ignore_errors
if not ignore_errors:
display.debug("marking %s as failed" % original_host.name)
if original_task.run_once:
# if we're using run_once, we have to fail every host here
for h in self._inventory.get_hosts(iterator._play.hosts):
if h.name not in self._tqm._unreachable_hosts:
iterator.mark_host_failed(h)
else:
iterator.mark_host_failed(original_host)
# grab the current state and if we're iterating on the rescue portion
# of a block then we save the failed task in a special var for use
# within the rescue/always
state, _ = iterator.get_next_task_for_host(original_host, peek=True)
if iterator.is_failed(original_host) and state and state.run_state == IteratingStates.COMPLETE:
self._tqm._failed_hosts[original_host.name] = True
# Use of get_active_state() here helps detect proper state if, say, we are in a rescue
# block from an included file (include_tasks). In a non-included rescue case, a rescue
# that starts with a new 'block' will have an active state of IteratingStates.TASKS, so we also
# check the current state block tree to see if any blocks are rescuing.
if state and (iterator.get_active_state(state).run_state == IteratingStates.RESCUE or
iterator.is_any_block_rescuing(state)):
self._tqm._stats.increment('rescued', original_host.name)
self._variable_manager.set_nonpersistent_facts(
original_host.name,
dict(
ansible_failed_task=wrap_var(original_task.serialize()),
ansible_failed_result=task_result._result,
),
)
else:
self._tqm._stats.increment('failures', original_host.name)
else:
self._tqm._stats.increment('ok', original_host.name)
self._tqm._stats.increment('ignored', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors)
elif task_result.is_unreachable():
ignore_unreachable = original_task.ignore_unreachable
if not ignore_unreachable:
self._tqm._unreachable_hosts[original_host.name] = True
iterator._play._removed_hosts.append(original_host.name)
self._tqm._stats.increment('dark', original_host.name)
else:
self._tqm._stats.increment('ok', original_host.name)
self._tqm._stats.increment('ignored', original_host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif task_result.is_skipped():
self._tqm._stats.increment('skipped', original_host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
else:
role_ran = True
if original_task.loop:
# this task had a loop, and has more than one result, so
# loop over all of them instead of a single result
result_items = task_result._result.get('results', [])
else:
result_items = [task_result._result]
for result_item in result_items:
if '_ansible_notify' in result_item:
if task_result.is_changed():
# The shared dictionary for notified handlers is a proxy, which
# does not detect when sub-objects within the proxy are modified.
# So, per the docs, we reassign the list so the proxy picks up and
# notifies all other threads
for handler_name in result_item['_ansible_notify']:
found = False
# Find the handler using the above helper. First we look up the
# dependency chain of the current task (if it's from a role), otherwise
# we just look through the list of handlers in the current play/all
# roles and use the first one that matches the notify name
target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers)
if target_handler is not None:
found = True
if target_handler.notify_host(original_host):
self._tqm.send_callback('v2_playbook_on_notify', target_handler, original_host)
for listening_handler_block in iterator._play.handlers:
for listening_handler in listening_handler_block.block:
listeners = getattr(listening_handler, 'listen', []) or []
if not listeners:
continue
listeners = listening_handler.get_validated_value(
'listen', listening_handler.fattributes.get('listen'), listeners, handler_templar
)
if handler_name not in listeners:
continue
else:
found = True
if listening_handler.notify_host(original_host):
self._tqm.send_callback('v2_playbook_on_notify', listening_handler, original_host)
# and if none were found, then we raise an error
if not found:
msg = ("The requested handler '%s' was not found in either the main handlers list nor in the listening "
"handlers list" % handler_name)
if C.ERROR_ON_MISSING_HANDLER:
raise AnsibleError(msg)
else:
display.warning(msg)
if 'add_host' in result_item:
# this task added a new host (add_host module)
new_host_info = result_item.get('add_host', dict())
self._inventory.add_dynamic_host(new_host_info, result_item)
# ensure host is available for subsequent plays
if result_item.get('changed') and new_host_info['host_name'] not in self._hosts_cache_all:
self._hosts_cache_all.append(new_host_info['host_name'])
elif 'add_group' in result_item:
# this task added a new group (group_by module)
self._inventory.add_dynamic_group(original_host, result_item)
if 'add_host' in result_item or 'add_group' in result_item:
item_vars = _get_item_vars(result_item, original_task)
found_task_vars = self._queued_task_cache.get((original_host.name, task_result._task._uuid))['task_vars']
if item_vars:
all_task_vars = combine_vars(found_task_vars, item_vars)
else:
all_task_vars = found_task_vars
all_task_vars[original_task.register] = wrap_var(result_item)
post_process_whens(result_item, original_task, handler_templar, all_task_vars)
if original_task.loop or original_task.loop_with:
new_item_result = TaskResult(
task_result._host,
task_result._task,
result_item,
task_result._task_fields,
)
self._tqm.send_callback('v2_runner_item_on_ok', new_item_result)
if result_item.get('changed', False):
task_result._result['changed'] = True
if result_item.get('failed', False):
task_result._result['failed'] = True
if 'ansible_facts' in result_item and original_task.action not in C._ACTION_DEBUG:
# if delegated fact and we are delegating facts, we need to change target host for them
if original_task.delegate_to is not None and original_task.delegate_facts:
host_list = self.get_delegated_hosts(result_item, original_task)
else:
# Set facts that should always be on the delegated hosts
self._set_always_delegated_facts(result_item, original_task)
host_list = self.get_task_hosts(iterator, original_host, original_task)
if original_task.action in C._ACTION_INCLUDE_VARS:
for (var_name, var_value) in result_item['ansible_facts'].items():
# find the host we're actually referring too here, which may
# be a host that is not really in inventory at all
for target_host in host_list:
self._variable_manager.set_host_variable(target_host, var_name, var_value)
else:
cacheable = result_item.pop('_ansible_facts_cacheable', False)
for target_host in host_list:
# so set_fact is a misnomer but 'cacheable = true' was meant to create an 'actual fact'
# to avoid issues with precedence and confusion with set_fact normal operation,
# we set BOTH fact and nonpersistent_facts (aka hostvar)
# when fact is retrieved from cache in subsequent operations it will have the lower precedence,
# but for playbook setting it the 'higher' precedence is kept
is_set_fact = original_task.action in C._ACTION_SET_FACT
if not is_set_fact or cacheable:
self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy())
if is_set_fact:
self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy())
if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']:
if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']:
host_list = self.get_task_hosts(iterator, original_host, original_task)
else:
host_list = [None]
data = result_item['ansible_stats']['data']
aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate']
for myhost in host_list:
for k in data.keys():
if aggregate:
self._tqm._stats.update_custom_stats(k, data[k], myhost)
else:
self._tqm._stats.set_custom_stats(k, data[k], myhost)
if 'diff' in task_result._result:
if self._diff or getattr(original_task, 'diff', False):
self._tqm.send_callback('v2_on_file_diff', task_result)
if not isinstance(original_task, TaskInclude):
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
# finally, send the ok for this task
self._tqm.send_callback('v2_runner_on_ok', task_result)
# register final results
if original_task.register:
host_list = self.get_task_hosts(iterator, original_host, original_task)
clean_copy = strip_internal_keys(module_response_deepcopy(task_result._result))
if 'invocation' in clean_copy:
del clean_copy['invocation']
for target_host in host_list:
self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy})
self._pending_results -= 1
if original_host.name in self._blocked_hosts:
del self._blocked_hosts[original_host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if original_task._role is not None and role_ran: # TODO: and original_task.action not in C._ACTION_INCLUDE_ROLE:?
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in iterator._play.ROLE_CACHE[original_task._role.get_name()].items():
if role_obj._uuid == original_task._role._uuid:
role_obj._had_task_run[original_host.name] = True
ret_results.append(task_result)
if isinstance(original_task, Handler):
for handler in (h for b in iterator._play.handlers for h in b.block if h._uuid == original_task._uuid):
handler.remove_host(original_host)
if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes:
break
cur_pass += 1
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
display.debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator)
ret_results.extend(results)
if self._pending_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending results, returning what we have")
return ret_results
def _copy_included_file(self, included_file):
'''
A proven safe and performant way to create a copy of an included file
'''
ti_copy = included_file._task.copy(exclude_parent=True)
ti_copy._parent = included_file._task._parent
temp_vars = ti_copy.vars | included_file._vars
ti_copy.vars = temp_vars
return ti_copy
def _load_included_file(self, included_file, iterator, is_handler=False):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
Raises AnsibleError exception in case of a failure during including a file,
in such case the caller is responsible for marking the host(s) as failed
using PlayIterator.mark_host_failed().
'''
display.debug("loading included file: %s" % included_file._filename)
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
elif not isinstance(data, list):
raise AnsibleError("included task files must contain a list of tasks")
ti_copy = self._copy_included_file(included_file)
block_list = load_list_of_blocks(
data,
play=iterator._play,
parent_block=ti_copy.build_parent_block(),
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader,
variable_manager=self._variable_manager,
)
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
except AnsibleParserError:
raise
except AnsibleError as e:
if isinstance(e, AnsibleFileNotFound):
reason = "Could not find or access '%s' on the Ansible Controller." % to_text(e.file_name)
else:
reason = to_text(e)
for r in included_file._results:
r._result['failed'] = True
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=reason))
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
raise AnsibleError(reason) from e
# finally, send the callback and return the list of blocks loaded
self._tqm.send_callback('v2_playbook_on_include', included_file)
display.debug("done processing included file")
return block_list
def _take_step(self, task, host=None):
ret = False
msg = u'Perform task: %s ' % task
if host:
msg += u'on %s ' % host
msg += u'(N)o/(y)es/(c)ontinue: '
resp = display.prompt(msg)
if resp.lower() in ['y', 'yes']:
display.debug("User ran task")
ret = True
elif resp.lower() in ['c', 'continue']:
display.debug("User ran task and canceled step mode")
self._step = False
ret = True
else:
display.debug("User skipped task")
display.banner(msg)
return ret
def _cond_not_supported_warn(self, task_name):
display.warning("%s task does not support when conditional" % task_name)
def _execute_meta(self, task, play_context, iterator, target_host):
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
def _evaluate_conditional(h):
all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
templar = Templar(loader=self._loader, variables=all_vars)
return task.evaluate_conditional(templar, all_vars)
skipped = False
msg = ''
skip_reason = '%s conditional evaluated to False' % meta_action
if isinstance(task, Handler):
self._tqm.send_callback('v2_playbook_on_handler_task_start', task)
else:
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
# These don't support "when" conditionals
if meta_action in ('noop', 'refresh_inventory', 'reset_connection') and task.when:
self._cond_not_supported_warn(meta_action)
if meta_action == 'noop':
msg = "noop"
elif meta_action == 'flush_handlers':
if _evaluate_conditional(target_host):
host_state = iterator.get_state_for_host(target_host.name)
if host_state.run_state == IteratingStates.HANDLERS:
raise AnsibleError('flush_handlers cannot be used as a handler')
if target_host.name not in self._tqm._unreachable_hosts:
host_state.pre_flushing_run_state = host_state.run_state
host_state.run_state = IteratingStates.HANDLERS
msg = "triggered running handlers for %s" % target_host.name
else:
skipped = True
skip_reason += ', not running handlers for %s' % target_host.name
elif meta_action == 'refresh_inventory':
self._inventory.refresh_inventory()
self._set_hosts_cache(iterator._play)
msg = "inventory successfully refreshed"
elif meta_action == 'clear_facts':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
hostname = host.get_name()
self._variable_manager.clear_facts(hostname)
msg = "facts cleared"
else:
skipped = True
skip_reason += ', not clearing facts and fact cache for %s' % target_host.name
elif meta_action == 'clear_host_errors':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
self._tqm._failed_hosts.pop(host.name, False)
self._tqm._unreachable_hosts.pop(host.name, False)
iterator.clear_host_errors(host)
msg = "cleared host errors"
else:
skipped = True
skip_reason += ', not clearing host error state for %s' % target_host.name
elif meta_action == 'end_batch':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator.set_run_state_for_host(host.name, IteratingStates.COMPLETE)
msg = "ending batch"
else:
skipped = True
skip_reason += ', continuing current batch'
elif meta_action == 'end_play':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator.set_run_state_for_host(host.name, IteratingStates.COMPLETE)
# end_play is used in PlaybookExecutor/TQM to indicate that
# the whole play is supposed to be ended as opposed to just a batch
iterator.end_play = True
msg = "ending play"
else:
skipped = True
skip_reason += ', continuing play'
elif meta_action == 'end_host':
if _evaluate_conditional(target_host):
iterator.set_run_state_for_host(target_host.name, IteratingStates.COMPLETE)
iterator._play._removed_hosts.append(target_host.name)
msg = "ending play for %s" % target_host.name
else:
skipped = True
skip_reason += ", continuing execution for %s" % target_host.name
# TODO: Nix msg here? Left for historical reasons, but skip_reason exists now.
msg = "end_host conditional evaluated to false, continuing execution for %s" % target_host.name
elif meta_action == 'role_complete':
# Allow users to use this in a play as reported in https://github.com/ansible/ansible/issues/22286?
# How would this work with allow_duplicates??
if task.implicit:
if target_host.name in task._role._had_task_run:
task._role._completed[target_host.name] = True
msg = 'role_complete for %s' % target_host.name
elif meta_action == 'reset_connection':
all_vars = self._variable_manager.get_vars(play=iterator._play, host=target_host, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
templar = Templar(loader=self._loader, variables=all_vars)
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
play_context = play_context.set_task_and_variable_override(task=task, variables=all_vars, templar=templar)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
play_context.post_validate(templar=templar)
# now that the play context is finalized, if the remote_addr is not set
# default to using the host's address field as the remote address
if not play_context.remote_addr:
play_context.remote_addr = target_host.address
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist. This 'mostly' works here cause meta
# disregards the loop, but should not really use play_context at all
play_context.update_vars(all_vars)
if target_host in self._active_connections:
connection = Connection(self._active_connections[target_host])
del self._active_connections[target_host]
else:
connection = plugin_loader.connection_loader.get(play_context.connection, play_context, os.devnull)
connection.set_options(task_keys=task.dump_attrs(), var_options=all_vars)
play_context.set_attributes_from_plugin(connection)
if connection:
try:
connection.reset()
msg = 'reset connection'
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
else:
msg = 'no connection, nothing to reset'
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
result = {'msg': msg}
if skipped:
result['skipped'] = True
result['skip_reason'] = skip_reason
else:
result['changed'] = False
display.vv("META: %s" % msg)
if isinstance(task, Handler):
task.remove_host(target_host)
res = TaskResult(target_host, task, result)
if skipped:
self._tqm.send_callback('v2_runner_on_skipped', res)
return [res]
def get_hosts_left(self, iterator):
''' returns list of available hosts for this iterator by filtering out unreachables '''
hosts_left = []
for host in self._hosts_cache:
if host not in self._tqm._unreachable_hosts:
try:
hosts_left.append(self._inventory.hosts[host])
except KeyError:
hosts_left.append(self._inventory.get_host(host))
return hosts_left
def update_active_connections(self, results):
''' updates the current active persistent connections '''
for r in results:
if 'args' in r._task_fields:
socket_path = r._task_fields['args'].get('_ansible_socket')
if socket_path:
if r._host not in self._active_connections:
self._active_connections[r._host] = socket_path
class NextAction(object):
""" The next action after an interpreter's exit. """
REDO = 1
CONTINUE = 2
EXIT = 3
def __init__(self, result=EXIT):
self.result = result
class Debugger(cmd.Cmd):
prompt_continuous = '> ' # multiple lines
def __init__(self, task, host, task_vars, play_context, result, next_action):
# cmd.Cmd is old-style class
cmd.Cmd.__init__(self)
self.prompt = '[%s] %s (debug)> ' % (host, task)
self.intro = None
self.scope = {}
self.scope['task'] = task
self.scope['task_vars'] = task_vars
self.scope['host'] = host
self.scope['play_context'] = play_context
self.scope['result'] = result
self.next_action = next_action
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
pass
do_h = cmd.Cmd.do_help
def do_EOF(self, args):
"""Quit"""
return self.do_quit(args)
def do_quit(self, args):
"""Quit"""
display.display('User interrupted execution')
self.next_action.result = NextAction.EXIT
return True
do_q = do_quit
def do_continue(self, args):
"""Continue to next result"""
self.next_action.result = NextAction.CONTINUE
return True
do_c = do_continue
def do_redo(self, args):
"""Schedule task for re-execution. The re-execution may not be the next result"""
self.next_action.result = NextAction.REDO
return True
do_r = do_redo
def do_update_task(self, args):
"""Recreate the task from ``task._ds``, and template with updated ``task_vars``"""
templar = Templar(None, variables=self.scope['task_vars'])
task = self.scope['task']
task = task.load_data(task._ds)
task.post_validate(templar)
self.scope['task'] = task
do_u = do_update_task
def evaluate(self, args):
try:
return eval(args, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def do_pprint(self, args):
"""Pretty Print"""
try:
result = self.evaluate(args)
display.display(pprint.pformat(result))
except Exception:
pass
do_p = do_pprint
def execute(self, args):
try:
code = compile(args + '\n', '<stdin>', 'single')
exec(code, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def default(self, line):
try:
self.execute(line)
except Exception:
pass
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,612 |
failed task inside an include_tasks inside a rescue block does not remove host from ansible_play_hosts nor from ansible_play_batch
|
### Summary
I have created a role that has two tasks files: main and validation. The main task file uses block/rescue to validate the health of a system, with the rescue block trying to recover a failed system and then re-running the validation tasks. If the system still reports failure after an attempted recovery, I expect the failed host to be removed from `ansible_play_hosts` and `ansible_play_batch`; however they persist. Interestingly, the failed host(s) are excluded from future tasks in the play, and are excluded from subsequent plays in a playbook. I was only able to reproduce this if the rescue block had a failure from an `include_tasks`.
### Issue Type
Bug Report
### Component Name
rescue
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.3]
config file = None
python version = 3.10.4 (main, Jun 29 2022, 12:14:53) [GCC 11.2.0]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
```
### OS / Environment
ubuntu 22.04
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
inventory:
```
[all]
a
b
```
roles/fail/tasks/validation.yml:
```yaml
- fail:
when: "inventory_hostname == 'a'"
```
roles/fail/tasks/main.yml:
```yaml
- block:
- include_tasks: "validation.yml"
rescue:
- debug:
msg: "Failed for {{ inventory_hostname }}"
- include_tasks: "validation.yml"
```
playbook:
```yaml
- hosts: "all"
gather_facts: false
tasks:
- include_role:
name: "fail"
- debug:
msg: "{{ item }} is: {{ lookup('vars', item) }}"
loop:
- "ansible_play_hosts_all"
- "ansible_play_hosts"
- "ansible_play_batch"
- hosts: "all"
gather_facts: false
tasks:
- debug:
msg: "second play for {{ inventory_hostname }}"
```
### Expected Results
I expected host a to be removed from ansible_play_hosts and ansible_play_batch
### Actual Results
```console
results:
PLAY [all] ***************************************************************************************************************************************************************************************************************************************************************************************************
TASK [include_role : fail] ***********************************************************************************************************************************************************************************************************************************************************************************
TASK [fail : include_tasks] **********************************************************************************************************************************************************************************************************************************************************************************
included: roles/fail/tasks/validation.yml for a, b
TASK [fail : fail] *******************************************************************************************************************************************************************************************************************************************************************************************
skipping: [b]
fatal: [a]: FAILED! => {"changed": false, "msg": "Failed as requested from task"}
TASK [fail : debug] ******************************************************************************************************************************************************************************************************************************************************************************************
ok: [a] => {
"msg": "Failed for a"
}
TASK [fail : include_tasks] **********************************************************************************************************************************************************************************************************************************************************************************
included: roles/fail/tasks/validation.yml for a
TASK [fail : fail] *******************************************************************************************************************************************************************************************************************************************************************************************
fatal: [a]: FAILED! => {"changed": false, "msg": "Failed as requested from task"}
TASK [debug] *************************************************************************************************************************************************************************************************************************************************************************************************
ok: [b] => (item=ansible_play_hosts_all) => {
"msg": "ansible_play_hosts_all is: ['a', 'b']"
}
ok: [b] => (item=ansible_play_hosts) => {
"msg": "ansible_play_hosts is: ['a', 'b']"
}
ok: [b] => (item=ansible_play_batch) => {
"msg": "ansible_play_batch is: ['a', 'b']"
}
PLAY [all] ***************************************************************************************************************************************************************************************************************************************************************************************************
TASK [debug] *************************************************************************************************************************************************************************************************************************************************************************************************
ok: [b] => {
"msg": "second play for b"
}
PLAY RECAP ***************************************************************************************************************************************************************************************************************************************************************************************************
a : ok=3 changed=0 unreachable=0 failed=1 skipped=0 rescued=1 ignored=0
b : ok=3 changed=0 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78612
|
https://github.com/ansible/ansible/pull/78657
|
38c3b5e8f1cd6898b8dfe54f1849c8e424b5dd49
|
19e7c5b0c1d5f682d2cbbae0ba8fd5eef653ba13
| 2022-08-22T14:16:46Z |
python
| 2022-08-30T15:16:21Z |
test/integration/targets/blocks/78612.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,612 |
failed task inside an include_tasks inside a rescue block does not remove host from ansible_play_hosts nor from ansible_play_batch
|
### Summary
I have created a role that has two tasks files: main and validation. The main task file uses block/rescue to validate the health of a system, with the rescue block trying to recover a failed system and then re-running the validation tasks. If the system still reports failure after an attempted recovery, I expect the failed host to be removed from `ansible_play_hosts` and `ansible_play_batch`; however they persist. Interestingly, the failed host(s) are excluded from future tasks in the play, and are excluded from subsequent plays in a playbook. I was only able to reproduce this if the rescue block had a failure from an `include_tasks`.
### Issue Type
Bug Report
### Component Name
rescue
### Ansible Version
```console
$ ansible --version
ansible [core 2.13.3]
config file = None
python version = 3.10.4 (main, Jun 29 2022, 12:14:53) [GCC 11.2.0]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
# if using a version older than ansible-core 2.12 you should omit the '-t all'
$ ansible-config dump --only-changed -t all
```
### OS / Environment
ubuntu 22.04
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
inventory:
```
[all]
a
b
```
roles/fail/tasks/validation.yml:
```yaml
- fail:
when: "inventory_hostname == 'a'"
```
roles/fail/tasks/main.yml:
```yaml
- block:
- include_tasks: "validation.yml"
rescue:
- debug:
msg: "Failed for {{ inventory_hostname }}"
- include_tasks: "validation.yml"
```
playbook:
```yaml
- hosts: "all"
gather_facts: false
tasks:
- include_role:
name: "fail"
- debug:
msg: "{{ item }} is: {{ lookup('vars', item) }}"
loop:
- "ansible_play_hosts_all"
- "ansible_play_hosts"
- "ansible_play_batch"
- hosts: "all"
gather_facts: false
tasks:
- debug:
msg: "second play for {{ inventory_hostname }}"
```
### Expected Results
I expected host a to be removed from ansible_play_hosts and ansible_play_batch
### Actual Results
```console
results:
PLAY [all] ***************************************************************************************************************************************************************************************************************************************************************************************************
TASK [include_role : fail] ***********************************************************************************************************************************************************************************************************************************************************************************
TASK [fail : include_tasks] **********************************************************************************************************************************************************************************************************************************************************************************
included: roles/fail/tasks/validation.yml for a, b
TASK [fail : fail] *******************************************************************************************************************************************************************************************************************************************************************************************
skipping: [b]
fatal: [a]: FAILED! => {"changed": false, "msg": "Failed as requested from task"}
TASK [fail : debug] ******************************************************************************************************************************************************************************************************************************************************************************************
ok: [a] => {
"msg": "Failed for a"
}
TASK [fail : include_tasks] **********************************************************************************************************************************************************************************************************************************************************************************
included: roles/fail/tasks/validation.yml for a
TASK [fail : fail] *******************************************************************************************************************************************************************************************************************************************************************************************
fatal: [a]: FAILED! => {"changed": false, "msg": "Failed as requested from task"}
TASK [debug] *************************************************************************************************************************************************************************************************************************************************************************************************
ok: [b] => (item=ansible_play_hosts_all) => {
"msg": "ansible_play_hosts_all is: ['a', 'b']"
}
ok: [b] => (item=ansible_play_hosts) => {
"msg": "ansible_play_hosts is: ['a', 'b']"
}
ok: [b] => (item=ansible_play_batch) => {
"msg": "ansible_play_batch is: ['a', 'b']"
}
PLAY [all] ***************************************************************************************************************************************************************************************************************************************************************************************************
TASK [debug] *************************************************************************************************************************************************************************************************************************************************************************************************
ok: [b] => {
"msg": "second play for b"
}
PLAY RECAP ***************************************************************************************************************************************************************************************************************************************************************************************************
a : ok=3 changed=0 unreachable=0 failed=1 skipped=0 rescued=1 ignored=0
b : ok=3 changed=0 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78612
|
https://github.com/ansible/ansible/pull/78657
|
38c3b5e8f1cd6898b8dfe54f1849c8e424b5dd49
|
19e7c5b0c1d5f682d2cbbae0ba8fd5eef653ba13
| 2022-08-22T14:16:46Z |
python
| 2022-08-30T15:16:21Z |
test/integration/targets/blocks/runme.sh
|
#!/usr/bin/env bash
set -eux
# This test does not use "$@" to avoid further increasing the verbosity beyond what is required for the test.
# Increasing verbosity from -vv to -vvv can increase the line count from ~400 to ~9K on our centos6 test container.
# remove old output log
rm -f block_test.out
# run the test and check to make sure the right number of completions was logged
ansible-playbook -vv main.yml -i ../../inventory | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
# cleanup the output log again, to make sure the test is clean
rm -f block_test.out block_test_wo_colors.out
# run test with free strategy and again count the completions
ansible-playbook -vv main.yml -i ../../inventory -e test_strategy=free | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
# cleanup the output log again, to make sure the test is clean
rm -f block_test.out block_test_wo_colors.out
# run test with host_pinned strategy and again count the completions
ansible-playbook -vv main.yml -i ../../inventory -e test_strategy=host_pinned | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
# run test that includes tasks that fail inside a block with always
rm -f block_test.out block_test_wo_colors.out
ansible-playbook -vv block_fail.yml -i ../../inventory | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
ansible-playbook -vv block_rescue_vars.yml
# https://github.com/ansible/ansible/issues/70000
set +e
exit_code=0
ansible-playbook -vv always_failure_with_rescue_rc.yml > rc_test.out || exit_code=$?
set -e
cat rc_test.out
[ $exit_code -eq 2 ]
[ "$(grep -c 'Failure in block' rc_test.out )" -eq 1 ]
[ "$(grep -c 'Rescue' rc_test.out )" -eq 1 ]
[ "$(grep -c 'Failure in always' rc_test.out )" -eq 1 ]
[ "$(grep -c 'DID NOT RUN' rc_test.out )" -eq 0 ]
rm -f rc_test_out
set +e
exit_code=0
ansible-playbook -vv always_no_rescue_rc.yml > rc_test.out || exit_code=$?
set -e
cat rc_test.out
[ $exit_code -eq 2 ]
[ "$(grep -c 'Failure in block' rc_test.out )" -eq 1 ]
[ "$(grep -c 'Always' rc_test.out )" -eq 1 ]
[ "$(grep -c 'DID NOT RUN' rc_test.out )" -eq 0 ]
rm -f rc_test.out
set +e
exit_code=0
ansible-playbook -vv always_failure_no_rescue_rc.yml > rc_test.out || exit_code=$?
set -e
cat rc_test.out
[ $exit_code -eq 2 ]
[ "$(grep -c 'Failure in block' rc_test.out )" -eq 1 ]
[ "$(grep -c 'Failure in always' rc_test.out )" -eq 1 ]
[ "$(grep -c 'DID NOT RUN' rc_test.out )" -eq 0 ]
rm -f rc_test.out
# https://github.com/ansible/ansible/issues/29047
ansible-playbook -vv issue29047.yml -i ../../inventory
# https://github.com/ansible/ansible/issues/61253
ansible-playbook -vv block_in_rescue.yml -i ../../inventory > rc_test.out
cat rc_test.out
[ "$(grep -c 'rescued=3' rc_test.out)" -eq 1 ]
[ "$(grep -c 'failed=0' rc_test.out)" -eq 1 ]
rm -f rc_test.out
# https://github.com/ansible/ansible/issues/71306
set +e
exit_code=0
ansible-playbook -i host1,host2 -vv issue71306.yml > rc_test.out || exit_code=$?
set -e
cat rc_test.out
[ $exit_code -eq 0 ]
rm -f rc_test_out
# https://github.com/ansible/ansible/issues/69848
ansible-playbook -i host1,host2 --tags foo -vv 69848.yml > role_complete_test.out
cat role_complete_test.out
[ "$(grep -c 'Tagged task' role_complete_test.out)" -eq 2 ]
[ "$(grep -c 'Not tagged task' role_complete_test.out)" -eq 0 ]
rm -f role_complete_test.out
# test notify inheritance
ansible-playbook inherit_notify.yml "$@"
ansible-playbook unsafe_failed_task.yml "$@"
ansible-playbook finalized_task.yml "$@"
# https://github.com/ansible/ansible/issues/72725
ansible-playbook -i host1,host2 -vv 72725.yml
# https://github.com/ansible/ansible/issues/72781
set +e
ansible-playbook -i host1,host2 -vv 72781.yml > 72781.out
set -e
cat 72781.out
[ "$(grep -c 'SHOULD NOT HAPPEN' 72781.out)" -eq 0 ]
rm -f 72781.out
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,648 |
python 3.5 broken by charset_normalizer v 2.1.0
|
### Summary
I know I should move to newer Ubuntu/python version, but I'm stuck with a ubuntu 16.06 installation that uses python 3.5.
If i try to run a playbook, it fails at the first `apt` task; analyzing the traceback there's a syntax error provoked by `charset_normalizer`, that [recently (version 2.1.0) dropped support for python 3.5](https://github.com/Ousret/charset_normalizer/releases/tag/2.1.0).
There's also an "OpenSSL" ImportError, but I still have to do a bit of research on that;
### Issue Type
Bug Report
### Component Name
apt
### Ansible Version
```console
ansible [core 2.13.3]
config file = None
configured module search path = ['/home/sanzo/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.10/site-packages/ansible
ansible collection location = /home/sanzo/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.10.6 (main, Aug 2 2022, 00:00:00) [GCC 12.1.1 20220507 (Red Hat 12.1.1-1)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
-
```
### OS / Environment
Host: Fedora 35
Target: Ubuntu 16.04 on Vagrant box
### Steps to Reproduce
```yml
- name: Install prerequisites for Docker repository
become: yes
ansible.builtin.apt:
name:
- apt-transport-https
- ca-certificates
- curl
- gnupg2
- software-properties-common
update_cache: yes
```
### Expected Results
The task should run without problems.
### Actual Results
```console
The full traceback is:
Traceback (most recent call last):
File "/tmp/ansible_ansible.builtin.apt_payload_cno7p5h4/ansible_ansible.builtin.apt_payload.zip/ansible/module_utils/urls.py", line 116, in <module>
File "/usr/local/lib/python3.5/dist-packages/urllib3/contrib/pyopenssl.py", line 50, in <module>
import OpenSSL.SSL
ImportError: No module named 'OpenSSL'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/vagrant/.ansible/tmp/ansible-tmp-1661458998.9793568-32605-133845591475306/AnsiballZ_apt.py", line 107, in <module>
_ansiballz_main()
File "/home/vagrant/.ansible/tmp/ansible-tmp-1661458998.9793568-32605-133845591475306/AnsiballZ_apt.py", line 99, in _ansiballz_main
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
File "/home/vagrant/.ansible/tmp/ansible-tmp-1661458998.9793568-32605-133845591475306/AnsiballZ_apt.py", line 48, in invoke_module
run_name='__main__', alter_sys=True)
File "/usr/lib/python3.5/runpy.py", line 196, in run_module
return _run_module_code(code, init_globals, run_name, mod_spec)
File "/usr/lib/python3.5/runpy.py", line 96, in _run_module_code
mod_name, mod_spec, pkg_name, script_name)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/tmp/ansible_ansible.builtin.apt_payload_cno7p5h4/ansible_ansible.builtin.apt_payload.zip/ansible/modules/apt.py", line 368, in <module>
File "<frozen importlib._bootstrap>", line 969, in _find_and_load
File "<frozen importlib._bootstrap>", line 958, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 664, in _load_unlocked
File "<frozen importlib._bootstrap>", line 634, in _load_backward_compatible
File "/tmp/ansible_ansible.builtin.apt_payload_cno7p5h4/ansible_ansible.builtin.apt_payload.zip/ansible/module_utils/urls.py", line 118, in <module>
File "/usr/local/lib/python3.5/dist-packages/requests/__init__.py", line 48, in <module>
from charset_normalizer import __version__ as charset_normalizer_version
File "/usr/local/lib/python3.5/dist-packages/charset_normalizer/__init__.py", line 24, in <module>
from .api import from_bytes, from_fp, from_path, normalize
File "/usr/local/lib/python3.5/dist-packages/charset_normalizer/api.py", line 71
previous_logger_level: int = logger.level
^
SyntaxError: invalid syntax
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78648
|
https://github.com/ansible/ansible/pull/78667
|
19e7c5b0c1d5f682d2cbbae0ba8fd5eef653ba13
|
f8e24e4a65a0a0d3e5a294d94c774c1ce1ac4152
| 2022-08-25T20:27:44Z |
python
| 2022-08-30T19:22:56Z |
changelogs/fragments/78648-urllib3-import-exceptions.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,648 |
python 3.5 broken by charset_normalizer v 2.1.0
|
### Summary
I know I should move to newer Ubuntu/python version, but I'm stuck with a ubuntu 16.06 installation that uses python 3.5.
If i try to run a playbook, it fails at the first `apt` task; analyzing the traceback there's a syntax error provoked by `charset_normalizer`, that [recently (version 2.1.0) dropped support for python 3.5](https://github.com/Ousret/charset_normalizer/releases/tag/2.1.0).
There's also an "OpenSSL" ImportError, but I still have to do a bit of research on that;
### Issue Type
Bug Report
### Component Name
apt
### Ansible Version
```console
ansible [core 2.13.3]
config file = None
configured module search path = ['/home/sanzo/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.10/site-packages/ansible
ansible collection location = /home/sanzo/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.10.6 (main, Aug 2 2022, 00:00:00) [GCC 12.1.1 20220507 (Red Hat 12.1.1-1)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
-
```
### OS / Environment
Host: Fedora 35
Target: Ubuntu 16.04 on Vagrant box
### Steps to Reproduce
```yml
- name: Install prerequisites for Docker repository
become: yes
ansible.builtin.apt:
name:
- apt-transport-https
- ca-certificates
- curl
- gnupg2
- software-properties-common
update_cache: yes
```
### Expected Results
The task should run without problems.
### Actual Results
```console
The full traceback is:
Traceback (most recent call last):
File "/tmp/ansible_ansible.builtin.apt_payload_cno7p5h4/ansible_ansible.builtin.apt_payload.zip/ansible/module_utils/urls.py", line 116, in <module>
File "/usr/local/lib/python3.5/dist-packages/urllib3/contrib/pyopenssl.py", line 50, in <module>
import OpenSSL.SSL
ImportError: No module named 'OpenSSL'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/vagrant/.ansible/tmp/ansible-tmp-1661458998.9793568-32605-133845591475306/AnsiballZ_apt.py", line 107, in <module>
_ansiballz_main()
File "/home/vagrant/.ansible/tmp/ansible-tmp-1661458998.9793568-32605-133845591475306/AnsiballZ_apt.py", line 99, in _ansiballz_main
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
File "/home/vagrant/.ansible/tmp/ansible-tmp-1661458998.9793568-32605-133845591475306/AnsiballZ_apt.py", line 48, in invoke_module
run_name='__main__', alter_sys=True)
File "/usr/lib/python3.5/runpy.py", line 196, in run_module
return _run_module_code(code, init_globals, run_name, mod_spec)
File "/usr/lib/python3.5/runpy.py", line 96, in _run_module_code
mod_name, mod_spec, pkg_name, script_name)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/tmp/ansible_ansible.builtin.apt_payload_cno7p5h4/ansible_ansible.builtin.apt_payload.zip/ansible/modules/apt.py", line 368, in <module>
File "<frozen importlib._bootstrap>", line 969, in _find_and_load
File "<frozen importlib._bootstrap>", line 958, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 664, in _load_unlocked
File "<frozen importlib._bootstrap>", line 634, in _load_backward_compatible
File "/tmp/ansible_ansible.builtin.apt_payload_cno7p5h4/ansible_ansible.builtin.apt_payload.zip/ansible/module_utils/urls.py", line 118, in <module>
File "/usr/local/lib/python3.5/dist-packages/requests/__init__.py", line 48, in <module>
from charset_normalizer import __version__ as charset_normalizer_version
File "/usr/local/lib/python3.5/dist-packages/charset_normalizer/__init__.py", line 24, in <module>
from .api import from_bytes, from_fp, from_path, normalize
File "/usr/local/lib/python3.5/dist-packages/charset_normalizer/api.py", line 71
previous_logger_level: int = logger.level
^
SyntaxError: invalid syntax
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78648
|
https://github.com/ansible/ansible/pull/78667
|
19e7c5b0c1d5f682d2cbbae0ba8fd5eef653ba13
|
f8e24e4a65a0a0d3e5a294d94c774c1ce1ac4152
| 2022-08-25T20:27:44Z |
python
| 2022-08-30T19:22:56Z |
lib/ansible/module_utils/urls.py
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# Copyright (c), Toshio Kuratomi <[email protected]>, 2015
#
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
#
# The match_hostname function and supporting code is under the terms and
# conditions of the Python Software Foundation License. They were taken from
# the Python3 standard library and adapted for use in Python2. See comments in the
# source for which code precisely is under this License.
#
# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0)
'''
The **urls** utils module offers a replacement for the urllib2 python library.
urllib2 is the python stdlib way to retrieve files from the Internet but it
lacks some security features (around verifying SSL certificates) that users
should care about in most situations. Using the functions in this module corrects
deficiencies in the urllib2 module wherever possible.
There are also third-party libraries (for instance, requests) which can be used
to replace urllib2 with a more secure library. However, all third party libraries
require that the library be installed on the managed machine. That is an extra step
for users making use of a module. If possible, avoid third party libraries by using
this code instead.
'''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import atexit
import base64
import email.mime.multipart
import email.mime.nonmultipart
import email.mime.application
import email.parser
import email.utils
import functools
import io
import mimetypes
import netrc
import os
import platform
import re
import socket
import sys
import tempfile
import traceback
import types
from contextlib import contextmanager
try:
import gzip
HAS_GZIP = True
GZIP_IMP_ERR = None
except ImportError:
HAS_GZIP = False
GZIP_IMP_ERR = traceback.format_exc()
GzipFile = object
else:
GzipFile = gzip.GzipFile # type: ignore[assignment,misc]
try:
import email.policy
except ImportError:
# Py2
import email.generator
try:
import httplib
except ImportError:
# Python 3
import http.client as httplib # type: ignore[no-redef]
import ansible.module_utils.compat.typing as t
import ansible.module_utils.six.moves.http_cookiejar as cookiejar
import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible.module_utils.common.collections import Mapping
from ansible.module_utils.six import PY2, PY3, string_types
from ansible.module_utils.six.moves import cStringIO
from ansible.module_utils.basic import get_distribution, missing_required_lib
from ansible.module_utils._text import to_bytes, to_native, to_text
try:
# python3
import urllib.request as urllib_request
from urllib.request import AbstractHTTPHandler, BaseHandler
except ImportError:
# python2
import urllib2 as urllib_request # type: ignore[no-redef]
from urllib2 import AbstractHTTPHandler, BaseHandler # type: ignore[no-redef]
urllib_request.HTTPRedirectHandler.http_error_308 = urllib_request.HTTPRedirectHandler.http_error_307 # type: ignore[attr-defined]
try:
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse, unquote
HAS_URLPARSE = True
except Exception:
HAS_URLPARSE = False
try:
import ssl
HAS_SSL = True
except Exception:
HAS_SSL = False
try:
# SNI Handling needs python2.7.9's SSLContext
from ssl import create_default_context, SSLContext
HAS_SSLCONTEXT = True
except ImportError:
HAS_SSLCONTEXT = False
# SNI Handling for python < 2.7.9 with urllib3 support
try:
# urllib3>=1.15
HAS_URLLIB3_SSL_WRAP_SOCKET = False
try:
from urllib3.contrib.pyopenssl import PyOpenSSLContext
except ImportError:
from requests.packages.urllib3.contrib.pyopenssl import PyOpenSSLContext
HAS_URLLIB3_PYOPENSSLCONTEXT = True
except ImportError:
# urllib3<1.15,>=1.6
HAS_URLLIB3_PYOPENSSLCONTEXT = False
try:
try:
from urllib3.contrib.pyopenssl import ssl_wrap_socket
except ImportError:
from requests.packages.urllib3.contrib.pyopenssl import ssl_wrap_socket
HAS_URLLIB3_SSL_WRAP_SOCKET = True
except ImportError:
pass
# Select a protocol that includes all secure tls protocols
# Exclude insecure ssl protocols if possible
if HAS_SSL:
# If we can't find extra tls methods, ssl.PROTOCOL_TLSv1 is sufficient
PROTOCOL = ssl.PROTOCOL_TLSv1
if not HAS_SSLCONTEXT and HAS_SSL:
try:
import ctypes
import ctypes.util
except ImportError:
# python 2.4 (likely rhel5 which doesn't have tls1.1 support in its openssl)
pass
else:
libssl_name = ctypes.util.find_library('ssl')
libssl = ctypes.CDLL(libssl_name)
for method in ('TLSv1_1_method', 'TLSv1_2_method'):
try:
libssl[method]
# Found something - we'll let openssl autonegotiate and hope
# the server has disabled sslv2 and 3. best we can do.
PROTOCOL = ssl.PROTOCOL_SSLv23
break
except AttributeError:
pass
del libssl
# The following makes it easier for us to script updates of the bundled backports.ssl_match_hostname
# The bundled backports.ssl_match_hostname should really be moved into its own file for processing
_BUNDLED_METADATA = {"pypi_name": "backports.ssl_match_hostname", "version": "3.7.0.1"}
LOADED_VERIFY_LOCATIONS = set() # type: t.Set[str]
HAS_MATCH_HOSTNAME = True
try:
from ssl import match_hostname, CertificateError
except ImportError:
try:
from backports.ssl_match_hostname import match_hostname, CertificateError # type: ignore[misc]
except ImportError:
HAS_MATCH_HOSTNAME = False
HAS_CRYPTOGRAPHY = True
try:
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.exceptions import UnsupportedAlgorithm
except ImportError:
HAS_CRYPTOGRAPHY = False
# Old import for GSSAPI authentication, this is not used in urls.py but kept for backwards compatibility.
try:
import urllib_gssapi
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
GSSAPI_IMP_ERR = None
try:
import gssapi
class HTTPGSSAPIAuthHandler(BaseHandler):
""" Handles Negotiate/Kerberos support through the gssapi library. """
AUTH_HEADER_PATTERN = re.compile(r'(?:.*)\s*(Negotiate|Kerberos)\s*([^,]*),?', re.I)
handler_order = 480 # Handle before Digest authentication
def __init__(self, username=None, password=None):
self.username = username
self.password = password
self._context = None
def get_auth_value(self, headers):
auth_match = self.AUTH_HEADER_PATTERN.search(headers.get('www-authenticate', ''))
if auth_match:
return auth_match.group(1), base64.b64decode(auth_match.group(2))
def http_error_401(self, req, fp, code, msg, headers):
# If we've already attempted the auth and we've reached this again then there was a failure.
if self._context:
return
parsed = generic_urlparse(urlparse(req.get_full_url()))
auth_header = self.get_auth_value(headers)
if not auth_header:
return
auth_protocol, in_token = auth_header
username = None
if self.username:
username = gssapi.Name(self.username, name_type=gssapi.NameType.user)
if username and self.password:
if not hasattr(gssapi.raw, 'acquire_cred_with_password'):
raise NotImplementedError("Platform GSSAPI library does not support "
"gss_acquire_cred_with_password, cannot acquire GSSAPI credential with "
"explicit username and password.")
b_password = to_bytes(self.password, errors='surrogate_or_strict')
cred = gssapi.raw.acquire_cred_with_password(username, b_password, usage='initiate').creds
else:
cred = gssapi.Credentials(name=username, usage='initiate')
# Get the peer certificate for the channel binding token if possible (HTTPS). A bug on macOS causes the
# authentication to fail when the CBT is present. Just skip that platform.
cbt = None
cert = getpeercert(fp, True)
if cert and platform.system() != 'Darwin':
cert_hash = get_channel_binding_cert_hash(cert)
if cert_hash:
cbt = gssapi.raw.ChannelBindings(application_data=b"tls-server-end-point:" + cert_hash)
# TODO: We could add another option that is set to include the port in the SPN if desired in the future.
target = gssapi.Name("HTTP@%s" % parsed['hostname'], gssapi.NameType.hostbased_service)
self._context = gssapi.SecurityContext(usage="initiate", name=target, creds=cred, channel_bindings=cbt)
resp = None
while not self._context.complete:
out_token = self._context.step(in_token)
if not out_token:
break
auth_header = '%s %s' % (auth_protocol, to_native(base64.b64encode(out_token)))
req.add_unredirected_header('Authorization', auth_header)
resp = self.parent.open(req)
# The response could contain a token that the client uses to validate the server
auth_header = self.get_auth_value(resp.headers)
if not auth_header:
break
in_token = auth_header[1]
return resp
except ImportError:
GSSAPI_IMP_ERR = traceback.format_exc()
HTTPGSSAPIAuthHandler = None # type: types.ModuleType | None # type: ignore[no-redef]
if not HAS_MATCH_HOSTNAME:
# The following block of code is under the terms and conditions of the
# Python Software Foundation License
"""The match_hostname() function from Python 3.4, essential when using SSL."""
try:
# Divergence: Python-3.7+'s _ssl has this exception type but older Pythons do not
from _ssl import SSLCertVerificationError
CertificateError = SSLCertVerificationError # type: ignore[misc]
except ImportError:
class CertificateError(ValueError): # type: ignore[no-redef]
pass
def _dnsname_match(dn, hostname):
"""Matching according to RFC 6125, section 6.4.3
- Hostnames are compared lower case.
- For IDNA, both dn and hostname must be encoded as IDN A-label (ACE).
- Partial wildcards like 'www*.example.org', multiple wildcards, sole
wildcard or wildcards in labels other then the left-most label are not
supported and a CertificateError is raised.
- A wildcard must match at least one character.
"""
if not dn:
return False
wildcards = dn.count('*')
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
if wildcards > 1:
# Divergence .format() to percent formatting for Python < 2.6
raise CertificateError(
"too many wildcards in certificate DNS name: %s" % repr(dn))
dn_leftmost, sep, dn_remainder = dn.partition('.')
if '*' in dn_remainder:
# Only match wildcard in leftmost segment.
# Divergence .format() to percent formatting for Python < 2.6
raise CertificateError(
"wildcard can only be present in the leftmost label: "
"%s." % repr(dn))
if not sep:
# no right side
# Divergence .format() to percent formatting for Python < 2.6
raise CertificateError(
"sole wildcard without additional labels are not support: "
"%s." % repr(dn))
if dn_leftmost != '*':
# no partial wildcard matching
# Divergence .format() to percent formatting for Python < 2.6
raise CertificateError(
"partial wildcards in leftmost label are not supported: "
"%s." % repr(dn))
hostname_leftmost, sep, hostname_remainder = hostname.partition('.')
if not hostname_leftmost or not sep:
# wildcard must match at least one char
return False
return dn_remainder.lower() == hostname_remainder.lower()
def _inet_paton(ipname):
"""Try to convert an IP address to packed binary form
Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6
support.
"""
# inet_aton() also accepts strings like '1'
# Divergence: We make sure we have native string type for all python versions
try:
b_ipname = to_bytes(ipname, errors='strict')
except UnicodeError:
raise ValueError("%s must be an all-ascii string." % repr(ipname))
# Set ipname in native string format
if sys.version_info < (3,):
n_ipname = b_ipname
else:
n_ipname = ipname
if n_ipname.count('.') == 3:
try:
return socket.inet_aton(n_ipname)
# Divergence: OSError on late python3. socket.error earlier.
# Null bytes generate ValueError on python3(we want to raise
# ValueError anyway), TypeError # earlier
except (OSError, socket.error, TypeError):
pass
try:
return socket.inet_pton(socket.AF_INET6, n_ipname)
# Divergence: OSError on late python3. socket.error earlier.
# Null bytes generate ValueError on python3(we want to raise
# ValueError anyway), TypeError # earlier
except (OSError, socket.error, TypeError):
# Divergence .format() to percent formatting for Python < 2.6
raise ValueError("%s is neither an IPv4 nor an IP6 "
"address." % repr(ipname))
except AttributeError:
# AF_INET6 not available
pass
# Divergence .format() to percent formatting for Python < 2.6
raise ValueError("%s is not an IPv4 address." % repr(ipname))
def _ipaddress_match(ipname, host_ip):
"""Exact matching of IP addresses.
RFC 6125 explicitly doesn't define an algorithm for this
(section 1.7.2 - "Out of Scope").
"""
# OpenSSL may add a trailing newline to a subjectAltName's IP address
ip = _inet_paton(ipname.rstrip())
return ip == host_ip
def match_hostname(cert, hostname): # type: ignore[misc]
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed.
The function matches IP addresses rather than dNSNames if hostname is a
valid ipaddress string. IPv4 addresses are supported on all platforms.
IPv6 addresses are supported on platforms with IPv6 support (AF_INET6
and inet_pton).
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
try:
# Divergence: Deal with hostname as bytes
host_ip = _inet_paton(to_text(hostname, errors='strict'))
except UnicodeError:
# Divergence: Deal with hostname as byte strings.
# IP addresses should be all ascii, so we consider it not
# an IP address if this fails
host_ip = None
except ValueError:
# Not an IP address (common case)
host_ip = None
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == 'IP Address':
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or subjectAltName fields were found")
# End of Python Software Foundation Licensed code
HAS_MATCH_HOSTNAME = True
# This is a dummy cacert provided for macOS since you need at least 1
# ca cert, regardless of validity, for Python on macOS to use the
# keychain functionality in OpenSSL for validating SSL certificates.
# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher
b_DUMMY_CA_CERT = b"""-----BEGIN CERTIFICATE-----
MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt
MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy
MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD
VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD
gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9
gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1
4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj
gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA
FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE
CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z
aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA
MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH
qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV
zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
-----END CERTIFICATE-----
"""
b_PEM_CERT_RE = re.compile(
br'^-----BEGIN CERTIFICATE-----\n.+?-----END CERTIFICATE-----$',
flags=re.M | re.S
)
#
# Exceptions
#
class ConnectionError(Exception):
"""Failed to connect to the server"""
pass
class ProxyError(ConnectionError):
"""Failure to connect because of a proxy"""
pass
class SSLValidationError(ConnectionError):
"""Failure to connect due to SSL validation failing"""
pass
class NoSSLError(SSLValidationError):
"""Needed to connect to an HTTPS url but no ssl library available to verify the certificate"""
pass
class MissingModuleError(Exception):
"""Failed to import 3rd party module required by the caller"""
def __init__(self, message, import_traceback, module=None):
super(MissingModuleError, self).__init__(message)
self.import_traceback = import_traceback
self.module = module
# Some environments (Google Compute Engine's CoreOS deploys) do not compile
# against openssl and thus do not have any HTTPS support.
CustomHTTPSConnection = None
CustomHTTPSHandler = None
HTTPSClientAuthHandler = None
UnixHTTPSConnection = None
if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib_request, 'HTTPSHandler'):
class CustomHTTPSConnection(httplib.HTTPSConnection): # type: ignore[no-redef]
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
self.context = None
if HAS_SSLCONTEXT:
self.context = self._context
elif HAS_URLLIB3_PYOPENSSLCONTEXT:
self.context = self._context = PyOpenSSLContext(PROTOCOL)
if self.context and self.cert_file:
self.context.load_cert_chain(self.cert_file, self.key_file)
def connect(self):
"Connect to a host on a given (SSL) port."
if hasattr(self, 'source_address'):
sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
else:
sock = socket.create_connection((self.host, self.port), self.timeout)
server_hostname = self.host
# Note: self._tunnel_host is not available on py < 2.6 but this code
# isn't used on py < 2.6 (lack of create_connection)
if self._tunnel_host:
self.sock = sock
self._tunnel()
server_hostname = self._tunnel_host
if HAS_SSLCONTEXT or HAS_URLLIB3_PYOPENSSLCONTEXT:
self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
self.sock = ssl_wrap_socket(sock, keyfile=self.key_file, cert_reqs=ssl.CERT_NONE, # pylint: disable=used-before-assignment
certfile=self.cert_file, ssl_version=PROTOCOL, server_hostname=server_hostname)
else:
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL)
class CustomHTTPSHandler(urllib_request.HTTPSHandler): # type: ignore[no-redef]
def https_open(self, req):
kwargs = {}
if HAS_SSLCONTEXT:
kwargs['context'] = self._context
return self.do_open(
functools.partial(
CustomHTTPSConnection,
**kwargs
),
req
)
https_request = AbstractHTTPHandler.do_request_
class HTTPSClientAuthHandler(urllib_request.HTTPSHandler): # type: ignore[no-redef]
'''Handles client authentication via cert/key
This is a fairly lightweight extension on HTTPSHandler, and can be used
in place of HTTPSHandler
'''
def __init__(self, client_cert=None, client_key=None, unix_socket=None, **kwargs):
urllib_request.HTTPSHandler.__init__(self, **kwargs)
self.client_cert = client_cert
self.client_key = client_key
self._unix_socket = unix_socket
def https_open(self, req):
return self.do_open(self._build_https_connection, req)
def _build_https_connection(self, host, **kwargs):
kwargs.update({
'cert_file': self.client_cert,
'key_file': self.client_key,
})
try:
kwargs['context'] = self._context
except AttributeError:
pass
if self._unix_socket:
return UnixHTTPSConnection(self._unix_socket)(host, **kwargs)
return httplib.HTTPSConnection(host, **kwargs)
@contextmanager
def unix_socket_patch_httpconnection_connect():
'''Monkey patch ``httplib.HTTPConnection.connect`` to be ``UnixHTTPConnection.connect``
so that when calling ``super(UnixHTTPSConnection, self).connect()`` we get the
correct behavior of creating self.sock for the unix socket
'''
_connect = httplib.HTTPConnection.connect
httplib.HTTPConnection.connect = UnixHTTPConnection.connect
yield
httplib.HTTPConnection.connect = _connect
class UnixHTTPSConnection(httplib.HTTPSConnection): # type: ignore[no-redef]
def __init__(self, unix_socket):
self._unix_socket = unix_socket
def connect(self):
# This method exists simply to ensure we monkeypatch
# httplib.HTTPConnection.connect to call UnixHTTPConnection.connect
with unix_socket_patch_httpconnection_connect():
# Disable pylint check for the super() call. It complains about UnixHTTPSConnection
# being a NoneType because of the initial definition above, but it won't actually
# be a NoneType when this code runs
# pylint: disable=bad-super-call
super(UnixHTTPSConnection, self).connect()
def __call__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
return self
class UnixHTTPConnection(httplib.HTTPConnection):
'''Handles http requests to a unix socket file'''
def __init__(self, unix_socket):
self._unix_socket = unix_socket
def connect(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
self.sock.connect(self._unix_socket)
except OSError as e:
raise OSError('Invalid Socket File (%s): %s' % (self._unix_socket, e))
if self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
self.sock.settimeout(self.timeout)
def __call__(self, *args, **kwargs):
httplib.HTTPConnection.__init__(self, *args, **kwargs)
return self
class UnixHTTPHandler(urllib_request.HTTPHandler):
'''Handler for Unix urls'''
def __init__(self, unix_socket, **kwargs):
urllib_request.HTTPHandler.__init__(self, **kwargs)
self._unix_socket = unix_socket
def http_open(self, req):
return self.do_open(UnixHTTPConnection(self._unix_socket), req)
class ParseResultDottedDict(dict):
'''
A dict that acts similarly to the ParseResult named tuple from urllib
'''
def __init__(self, *args, **kwargs):
super(ParseResultDottedDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def as_list(self):
'''
Generate a list from this dict, that looks like the ParseResult named tuple
'''
return [self.get(k, None) for k in ('scheme', 'netloc', 'path', 'params', 'query', 'fragment')]
def generic_urlparse(parts):
'''
Returns a dictionary of url parts as parsed by urlparse,
but accounts for the fact that older versions of that
library do not support named attributes (ie. .netloc)
'''
generic_parts = ParseResultDottedDict()
if hasattr(parts, 'netloc'):
# urlparse is newer, just read the fields straight
# from the parts object
generic_parts['scheme'] = parts.scheme
generic_parts['netloc'] = parts.netloc
generic_parts['path'] = parts.path
generic_parts['params'] = parts.params
generic_parts['query'] = parts.query
generic_parts['fragment'] = parts.fragment
generic_parts['username'] = parts.username
generic_parts['password'] = parts.password
hostname = parts.hostname
if hostname and hostname[0] == '[' and '[' in parts.netloc and ']' in parts.netloc:
# Py2.6 doesn't parse IPv6 addresses correctly
hostname = parts.netloc.split(']')[0][1:].lower()
generic_parts['hostname'] = hostname
try:
port = parts.port
except ValueError:
# Py2.6 doesn't parse IPv6 addresses correctly
netloc = parts.netloc.split('@')[-1].split(']')[-1]
if ':' in netloc:
port = netloc.split(':')[1]
if port:
port = int(port)
else:
port = None
generic_parts['port'] = port
else:
# we have to use indexes, and then parse out
# the other parts not supported by indexing
generic_parts['scheme'] = parts[0]
generic_parts['netloc'] = parts[1]
generic_parts['path'] = parts[2]
generic_parts['params'] = parts[3]
generic_parts['query'] = parts[4]
generic_parts['fragment'] = parts[5]
# get the username, password, etc.
try:
netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$')
match = netloc_re.match(parts[1])
auth = match.group(1)
hostname = match.group(2)
port = match.group(3)
if port:
# the capture group for the port will include the ':',
# so remove it and convert the port to an integer
port = int(port[1:])
if auth:
# the capture group above includes the @, so remove it
# and then split it up based on the first ':' found
auth = auth[:-1]
username, password = auth.split(':', 1)
else:
username = password = None
generic_parts['username'] = username
generic_parts['password'] = password
generic_parts['hostname'] = hostname
generic_parts['port'] = port
except Exception:
generic_parts['username'] = None
generic_parts['password'] = None
generic_parts['hostname'] = parts[1]
generic_parts['port'] = None
return generic_parts
def extract_pem_certs(b_data):
for match in b_PEM_CERT_RE.finditer(b_data):
yield match.group(0)
def get_response_filename(response):
url = response.geturl()
path = urlparse(url)[2]
filename = os.path.basename(path.rstrip('/')) or None
if filename:
filename = unquote(filename)
return response.headers.get_param('filename', header='content-disposition') or filename
def parse_content_type(response):
if PY2:
get_type = response.headers.gettype
get_param = response.headers.getparam
else:
get_type = response.headers.get_content_type
get_param = response.headers.get_param
content_type = (get_type() or 'application/octet-stream').split(',')[0]
main_type, sub_type = content_type.split('/')
charset = (get_param('charset') or 'utf-8').split(',')[0]
return content_type, main_type, sub_type, charset
class GzipDecodedReader(GzipFile):
"""A file-like object to decode a response encoded with the gzip
method, as described in RFC 1952.
Largely copied from ``xmlrpclib``/``xmlrpc.client``
"""
def __init__(self, fp):
if not HAS_GZIP:
raise MissingModuleError(self.missing_gzip_error(), import_traceback=GZIP_IMP_ERR)
if PY3:
self._io = fp
else:
# Py2 ``HTTPResponse``/``addinfourl`` doesn't support all of the file object
# functionality GzipFile requires
self._io = io.BytesIO()
for block in iter(functools.partial(fp.read, 65536), b''):
self._io.write(block)
self._io.seek(0)
fp.close()
gzip.GzipFile.__init__(self, mode='rb', fileobj=self._io) # pylint: disable=non-parent-init-called
def close(self):
try:
gzip.GzipFile.close(self)
finally:
self._io.close()
@staticmethod
def missing_gzip_error():
return missing_required_lib(
'gzip',
reason='to decompress gzip encoded responses. '
'Set "decompress" to False, to prevent attempting auto decompression'
)
class RequestWithMethod(urllib_request.Request):
'''
Workaround for using DELETE/PUT/etc with urllib2
Originally contained in library/net_infrastructure/dnsmadeeasy
'''
def __init__(self, url, method, data=None, headers=None, origin_req_host=None, unverifiable=True):
if headers is None:
headers = {}
self._method = method.upper()
urllib_request.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)
def get_method(self):
if self._method:
return self._method
else:
return urllib_request.Request.get_method(self)
def RedirectHandlerFactory(follow_redirects=None, validate_certs=True, ca_path=None):
"""This is a class factory that closes over the value of
``follow_redirects`` so that the RedirectHandler class has access to
that value without having to use globals, and potentially cause problems
where ``open_url`` or ``fetch_url`` are used multiple times in a module.
"""
class RedirectHandler(urllib_request.HTTPRedirectHandler):
"""This is an implementation of a RedirectHandler to match the
functionality provided by httplib2. It will utilize the value of
``follow_redirects`` that is passed into ``RedirectHandlerFactory``
to determine how redirects should be handled in urllib2.
"""
def redirect_request(self, req, fp, code, msg, hdrs, newurl):
if not HAS_SSLCONTEXT:
handler = maybe_add_ssl_handler(newurl, validate_certs, ca_path=ca_path)
if handler:
urllib_request._opener.add_handler(handler)
# Preserve urllib2 compatibility
if follow_redirects == 'urllib2':
return urllib_request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl)
# Handle disabled redirects
elif follow_redirects in ['no', 'none', False]:
raise urllib_error.HTTPError(newurl, code, msg, hdrs, fp)
method = req.get_method()
# Handle non-redirect HTTP status or invalid follow_redirects
if follow_redirects in ['all', 'yes', True]:
if code < 300 or code >= 400:
raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
elif follow_redirects == 'safe':
if code < 300 or code >= 400 or method not in ('GET', 'HEAD'):
raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
else:
raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
try:
# Python 2-3.3
data = req.get_data()
origin_req_host = req.get_origin_req_host()
except AttributeError:
# Python 3.4+
data = req.data
origin_req_host = req.origin_req_host
# Be conciliant with URIs containing a space
newurl = newurl.replace(' ', '%20')
# Support redirect with payload and original headers
if code in (307, 308):
# Preserve payload and headers
headers = req.headers
else:
# Do not preserve payload and filter headers
data = None
headers = dict((k, v) for k, v in req.headers.items()
if k.lower() not in ("content-length", "content-type", "transfer-encoding"))
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if code == 303 and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if code == 302 and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
if code == 301 and method == 'POST':
method = 'GET'
return RequestWithMethod(newurl,
method=method,
headers=headers,
data=data,
origin_req_host=origin_req_host,
unverifiable=True,
)
return RedirectHandler
def build_ssl_validation_error(hostname, port, paths, exc=None):
'''Inteligently build out the SSLValidationError based on what support
you have installed
'''
msg = [
('Failed to validate the SSL certificate for %s:%s.'
' Make sure your managed systems have a valid CA'
' certificate installed.')
]
if not HAS_SSLCONTEXT:
msg.append('If the website serving the url uses SNI you need'
' python >= 2.7.9 on your managed machine')
msg.append(' (the python executable used (%s) is version: %s)' %
(sys.executable, ''.join(sys.version.splitlines())))
if not HAS_URLLIB3_PYOPENSSLCONTEXT and not HAS_URLLIB3_SSL_WRAP_SOCKET:
msg.append('or you can install the `urllib3`, `pyOpenSSL`,'
' `ndg-httpsclient`, and `pyasn1` python modules')
msg.append('to perform SNI verification in python >= 2.6.')
msg.append('You can use validate_certs=False if you do'
' not need to confirm the servers identity but this is'
' unsafe and not recommended.'
' Paths checked for this platform: %s.')
if exc:
msg.append('The exception msg was: %s.' % to_native(exc))
raise SSLValidationError(' '.join(msg) % (hostname, port, ", ".join(paths)))
def atexit_remove_file(filename):
if os.path.exists(filename):
try:
os.unlink(filename)
except Exception:
# just ignore if we cannot delete, things should be ok
pass
class SSLValidationHandler(urllib_request.BaseHandler):
'''
A custom handler class for SSL validation.
Based on:
http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
http://techknack.net/python-urllib2-handlers/
'''
CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\n"
def __init__(self, hostname, port, ca_path=None):
self.hostname = hostname
self.port = port
self.ca_path = ca_path
def get_ca_certs(self):
# tries to find a valid CA cert in one of the
# standard locations for the current distribution
ca_certs = []
cadata = bytearray()
paths_checked = []
if self.ca_path:
paths_checked = [self.ca_path]
with open(to_bytes(self.ca_path, errors='surrogate_or_strict'), 'rb') as f:
if HAS_SSLCONTEXT:
for b_pem in extract_pem_certs(f.read()):
cadata.extend(
ssl.PEM_cert_to_DER_cert(
to_native(b_pem, errors='surrogate_or_strict')
)
)
return self.ca_path, cadata, paths_checked
if not HAS_SSLCONTEXT:
paths_checked.append('/etc/ssl/certs')
system = to_text(platform.system(), errors='surrogate_or_strict')
# build a list of paths to check for .crt/.pem files
# based on the platform type
if system == u'Linux':
paths_checked.append('/etc/pki/ca-trust/extracted/pem')
paths_checked.append('/etc/pki/tls/certs')
paths_checked.append('/usr/share/ca-certificates/cacert.org')
elif system == u'FreeBSD':
paths_checked.append('/usr/local/share/certs')
elif system == u'OpenBSD':
paths_checked.append('/etc/ssl')
elif system == u'NetBSD':
ca_certs.append('/etc/openssl/certs')
elif system == u'SunOS':
paths_checked.append('/opt/local/etc/openssl/certs')
elif system == u'AIX':
paths_checked.append('/var/ssl/certs')
paths_checked.append('/opt/freeware/etc/ssl/certs')
# fall back to a user-deployed cert in a standard
# location if the OS platform one is not available
paths_checked.append('/etc/ansible')
tmp_path = None
if not HAS_SSLCONTEXT:
tmp_fd, tmp_path = tempfile.mkstemp()
atexit.register(atexit_remove_file, tmp_path)
# Write the dummy ca cert if we are running on macOS
if system == u'Darwin':
if HAS_SSLCONTEXT:
cadata.extend(
ssl.PEM_cert_to_DER_cert(
to_native(b_DUMMY_CA_CERT, errors='surrogate_or_strict')
)
)
else:
os.write(tmp_fd, b_DUMMY_CA_CERT)
# Default Homebrew path for OpenSSL certs
paths_checked.append('/usr/local/etc/openssl')
# for all of the paths, find any .crt or .pem files
# and compile them into single temp file for use
# in the ssl check to speed up the test
for path in paths_checked:
if os.path.exists(path) and os.path.isdir(path):
dir_contents = os.listdir(path)
for f in dir_contents:
full_path = os.path.join(path, f)
if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt', '.pem'):
try:
if full_path not in LOADED_VERIFY_LOCATIONS:
with open(full_path, 'rb') as cert_file:
b_cert = cert_file.read()
if HAS_SSLCONTEXT:
try:
for b_pem in extract_pem_certs(b_cert):
cadata.extend(
ssl.PEM_cert_to_DER_cert(
to_native(b_pem, errors='surrogate_or_strict')
)
)
except Exception:
continue
else:
os.write(tmp_fd, b_cert)
os.write(tmp_fd, b'\n')
except (OSError, IOError):
pass
if HAS_SSLCONTEXT:
default_verify_paths = ssl.get_default_verify_paths()
paths_checked[:0] = [default_verify_paths.capath]
else:
os.close(tmp_fd)
return (tmp_path, cadata, paths_checked)
def validate_proxy_response(self, response, valid_codes=None):
'''
make sure we get back a valid code from the proxy
'''
valid_codes = [200] if valid_codes is None else valid_codes
try:
(http_version, resp_code, msg) = re.match(br'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
if int(resp_code) not in valid_codes:
raise Exception
except Exception:
raise ProxyError('Connection to proxy failed')
def detect_no_proxy(self, url):
'''
Detect if the 'no_proxy' environment variable is set and honor those locations.
'''
env_no_proxy = os.environ.get('no_proxy')
if env_no_proxy:
env_no_proxy = env_no_proxy.split(',')
netloc = urlparse(url).netloc
for host in env_no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# Our requested URL matches something in no_proxy, so don't
# use the proxy for this
return False
return True
def make_context(self, cafile, cadata):
cafile = self.ca_path or cafile
if self.ca_path:
cadata = None
else:
cadata = cadata or None
if HAS_SSLCONTEXT:
context = create_default_context(cafile=cafile)
elif HAS_URLLIB3_PYOPENSSLCONTEXT:
context = PyOpenSSLContext(PROTOCOL)
else:
raise NotImplementedError('Host libraries are too old to support creating an sslcontext')
if cafile or cadata:
context.load_verify_locations(cafile=cafile, cadata=cadata)
return context
def http_request(self, req):
tmp_ca_cert_path, cadata, paths_checked = self.get_ca_certs()
# Detect if 'no_proxy' environment variable is set and if our URL is included
use_proxy = self.detect_no_proxy(req.get_full_url())
https_proxy = os.environ.get('https_proxy')
context = None
try:
context = self.make_context(tmp_ca_cert_path, cadata)
except NotImplementedError:
# We'll make do with no context below
pass
try:
if use_proxy and https_proxy:
proxy_parts = generic_urlparse(urlparse(https_proxy))
port = proxy_parts.get('port') or 443
proxy_hostname = proxy_parts.get('hostname', None)
if proxy_hostname is None or proxy_parts.get('scheme') == '':
raise ProxyError("Failed to parse https_proxy environment variable."
" Please make sure you export https proxy as 'https_proxy=<SCHEME>://<IP_ADDRESS>:<PORT>'")
s = socket.create_connection((proxy_hostname, port))
if proxy_parts.get('scheme') == 'http':
s.sendall(to_bytes(self.CONNECT_COMMAND % (self.hostname, self.port), errors='surrogate_or_strict'))
if proxy_parts.get('username'):
credentials = "%s:%s" % (proxy_parts.get('username', ''), proxy_parts.get('password', ''))
s.sendall(b'Proxy-Authorization: Basic %s\r\n' % base64.b64encode(to_bytes(credentials, errors='surrogate_or_strict')).strip())
s.sendall(b'\r\n')
connect_result = b""
while connect_result.find(b"\r\n\r\n") <= 0:
connect_result += s.recv(4096)
# 128 kilobytes of headers should be enough for everyone.
if len(connect_result) > 131072:
raise ProxyError('Proxy sent too verbose headers. Only 128KiB allowed.')
self.validate_proxy_response(connect_result)
if context:
ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
else:
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
match_hostname(ssl_s.getpeercert(), self.hostname)
else:
raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
else:
s = socket.create_connection((self.hostname, self.port))
if context:
ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
else:
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
match_hostname(ssl_s.getpeercert(), self.hostname)
# close the ssl connection
# ssl_s.unwrap()
s.close()
except (ssl.SSLError, CertificateError) as e:
build_ssl_validation_error(self.hostname, self.port, paths_checked, e)
except socket.error as e:
raise ConnectionError('Failed to connect to %s at port %s: %s' % (self.hostname, self.port, to_native(e)))
return req
https_request = http_request
def maybe_add_ssl_handler(url, validate_certs, ca_path=None):
parsed = generic_urlparse(urlparse(url))
if parsed.scheme == 'https' and validate_certs:
if not HAS_SSL:
raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False,'
' however this is unsafe and not recommended')
# create the SSL validation handler and
# add it to the list of handlers
return SSLValidationHandler(parsed.hostname, parsed.port or 443, ca_path=ca_path)
def getpeercert(response, binary_form=False):
""" Attempt to get the peer certificate of the response from urlopen. """
# The response from urllib2.open() is different across Python 2 and 3
if PY3:
socket = response.fp.raw._sock
else:
socket = response.fp._sock.fp._sock
try:
return socket.getpeercert(binary_form)
except AttributeError:
pass # Not HTTPS
def get_channel_binding_cert_hash(certificate_der):
""" Gets the channel binding app data for a TLS connection using the peer cert. """
if not HAS_CRYPTOGRAPHY:
return
# Logic documented in RFC 5929 section 4 https://tools.ietf.org/html/rfc5929#section-4
cert = x509.load_der_x509_certificate(certificate_der, default_backend())
hash_algorithm = None
try:
hash_algorithm = cert.signature_hash_algorithm
except UnsupportedAlgorithm:
pass
# If the signature hash algorithm is unknown/unsupported or md5/sha1 we must use SHA256.
if not hash_algorithm or hash_algorithm.name in ['md5', 'sha1']:
hash_algorithm = hashes.SHA256()
digest = hashes.Hash(hash_algorithm, default_backend())
digest.update(certificate_der)
return digest.finalize()
def rfc2822_date_string(timetuple, zone='-0000'):
"""Accepts a timetuple and optional zone which defaults to ``-0000``
and returns a date string as specified by RFC 2822, e.g.:
Fri, 09 Nov 2001 01:08:47 -0000
Copied from email.utils.formatdate and modified for separate use
"""
return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]],
timetuple[2],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1],
timetuple[0], timetuple[3], timetuple[4], timetuple[5],
zone)
class Request:
def __init__(self, headers=None, use_proxy=True, force=False, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=False,
follow_redirects='urllib2', client_cert=None, client_key=None, cookies=None, unix_socket=None,
ca_path=None, unredirected_headers=None, decompress=True):
"""This class works somewhat similarly to the ``Session`` class of from requests
by defining a cookiejar that an be used across requests as well as cascaded defaults that
can apply to repeated requests
For documentation of params, see ``Request.open``
>>> from ansible.module_utils.urls import Request
>>> r = Request()
>>> r.open('GET', 'http://httpbin.org/cookies/set?k1=v1').read()
'{\n "cookies": {\n "k1": "v1"\n }\n}\n'
>>> r = Request(url_username='user', url_password='passwd')
>>> r.open('GET', 'http://httpbin.org/basic-auth/user/passwd').read()
'{\n "authenticated": true, \n "user": "user"\n}\n'
>>> r = Request(headers=dict(foo='bar'))
>>> r.open('GET', 'http://httpbin.org/get', headers=dict(baz='qux')).read()
"""
self.headers = headers or {}
if not isinstance(self.headers, dict):
raise ValueError("headers must be a dict: %r" % self.headers)
self.use_proxy = use_proxy
self.force = force
self.timeout = timeout
self.validate_certs = validate_certs
self.url_username = url_username
self.url_password = url_password
self.http_agent = http_agent
self.force_basic_auth = force_basic_auth
self.follow_redirects = follow_redirects
self.client_cert = client_cert
self.client_key = client_key
self.unix_socket = unix_socket
self.ca_path = ca_path
self.unredirected_headers = unredirected_headers
self.decompress = decompress
if isinstance(cookies, cookiejar.CookieJar):
self.cookies = cookies
else:
self.cookies = cookiejar.CookieJar()
def _fallback(self, value, fallback):
if value is None:
return fallback
return value
def open(self, method, url, data=None, headers=None, use_proxy=None,
force=None, last_mod_time=None, timeout=None, validate_certs=None,
url_username=None, url_password=None, http_agent=None,
force_basic_auth=None, follow_redirects=None,
client_cert=None, client_key=None, cookies=None, use_gssapi=False,
unix_socket=None, ca_path=None, unredirected_headers=None, decompress=None):
"""
Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3)
Does not require the module environment
Returns :class:`HTTPResponse` object.
:arg method: method for the request
:arg url: URL to request
:kwarg data: (optional) bytes, or file-like object to send
in the body of the request
:kwarg headers: (optional) Dictionary of HTTP Headers to send with the
request
:kwarg use_proxy: (optional) Boolean of whether or not to use proxy
:kwarg force: (optional) Boolean of whether or not to set `cache-control: no-cache` header
:kwarg last_mod_time: (optional) Datetime object to use when setting If-Modified-Since header
:kwarg timeout: (optional) How long to wait for the server to send
data before giving up, as a float
:kwarg validate_certs: (optional) Booleani that controls whether we verify
the server's TLS certificate
:kwarg url_username: (optional) String of the user to use when authenticating
:kwarg url_password: (optional) String of the password to use when authenticating
:kwarg http_agent: (optional) String of the User-Agent to use in the request
:kwarg force_basic_auth: (optional) Boolean determining if auth header should be sent in the initial request
:kwarg follow_redirects: (optional) String of urllib2, all/yes, safe, none to determine how redirects are
followed, see RedirectHandlerFactory for more information
:kwarg client_cert: (optional) PEM formatted certificate chain file to be used for SSL client authentication.
This file can also include the key as well, and if the key is included, client_key is not required
:kwarg client_key: (optional) PEM formatted file that contains your private key to be used for SSL client
authentication. If client_cert contains both the certificate and key, this option is not required
:kwarg cookies: (optional) CookieJar object to send with the
request
:kwarg use_gssapi: (optional) Use GSSAPI handler of requests.
:kwarg unix_socket: (optional) String of file system path to unix socket file to use when establishing
connection to the provided url
:kwarg ca_path: (optional) String of file system path to CA cert bundle to use
:kwarg unredirected_headers: (optional) A list of headers to not attach on a redirected request
:kwarg decompress: (optional) Whether to attempt to decompress gzip content-encoded responses
:returns: HTTPResponse. Added in Ansible 2.9
"""
method = method.upper()
if headers is None:
headers = {}
elif not isinstance(headers, dict):
raise ValueError("headers must be a dict")
headers = dict(self.headers, **headers)
use_proxy = self._fallback(use_proxy, self.use_proxy)
force = self._fallback(force, self.force)
timeout = self._fallback(timeout, self.timeout)
validate_certs = self._fallback(validate_certs, self.validate_certs)
url_username = self._fallback(url_username, self.url_username)
url_password = self._fallback(url_password, self.url_password)
http_agent = self._fallback(http_agent, self.http_agent)
force_basic_auth = self._fallback(force_basic_auth, self.force_basic_auth)
follow_redirects = self._fallback(follow_redirects, self.follow_redirects)
client_cert = self._fallback(client_cert, self.client_cert)
client_key = self._fallback(client_key, self.client_key)
cookies = self._fallback(cookies, self.cookies)
unix_socket = self._fallback(unix_socket, self.unix_socket)
ca_path = self._fallback(ca_path, self.ca_path)
unredirected_headers = self._fallback(unredirected_headers, self.unredirected_headers)
decompress = self._fallback(decompress, self.decompress)
handlers = []
if unix_socket:
handlers.append(UnixHTTPHandler(unix_socket))
ssl_handler = maybe_add_ssl_handler(url, validate_certs, ca_path=ca_path)
if ssl_handler and not HAS_SSLCONTEXT:
handlers.append(ssl_handler)
parsed = generic_urlparse(urlparse(url))
if parsed.scheme != 'ftp':
username = url_username
password = url_password
if username:
netloc = parsed.netloc
elif '@' in parsed.netloc:
credentials, netloc = parsed.netloc.split('@', 1)
if ':' in credentials:
username, password = credentials.split(':', 1)
else:
username = credentials
password = ''
parsed_list = parsed.as_list()
parsed_list[1] = netloc
# reconstruct url without credentials
url = urlunparse(parsed_list)
if use_gssapi:
if HTTPGSSAPIAuthHandler:
handlers.append(HTTPGSSAPIAuthHandler(username, password))
else:
imp_err_msg = missing_required_lib('gssapi', reason='for use_gssapi=True',
url='https://pypi.org/project/gssapi/')
raise MissingModuleError(imp_err_msg, import_traceback=GSSAPI_IMP_ERR)
elif username and not force_basic_auth:
passman = urllib_request.HTTPPasswordMgrWithDefaultRealm()
# this creates a password manager
passman.add_password(None, netloc, username, password)
# because we have put None at the start it will always
# use this username/password combination for urls
# for which `theurl` is a super-url
authhandler = urllib_request.HTTPBasicAuthHandler(passman)
digest_authhandler = urllib_request.HTTPDigestAuthHandler(passman)
# create the AuthHandler
handlers.append(authhandler)
handlers.append(digest_authhandler)
elif username and force_basic_auth:
headers["Authorization"] = basic_auth_header(username, password)
else:
try:
rc = netrc.netrc(os.environ.get('NETRC'))
login = rc.authenticators(parsed.hostname)
except IOError:
login = None
if login:
username, _, password = login
if username and password:
headers["Authorization"] = basic_auth_header(username, password)
if not use_proxy:
proxyhandler = urllib_request.ProxyHandler({})
handlers.append(proxyhandler)
context = None
if HAS_SSLCONTEXT and not validate_certs:
# In 2.7.9, the default context validates certificates
context = SSLContext(ssl.PROTOCOL_SSLv23)
if ssl.OP_NO_SSLv2:
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
context.verify_mode = ssl.CERT_NONE
context.check_hostname = False
handlers.append(HTTPSClientAuthHandler(client_cert=client_cert,
client_key=client_key,
context=context,
unix_socket=unix_socket))
elif client_cert or unix_socket:
handlers.append(HTTPSClientAuthHandler(client_cert=client_cert,
client_key=client_key,
unix_socket=unix_socket))
if ssl_handler and HAS_SSLCONTEXT and validate_certs:
tmp_ca_path, cadata, paths_checked = ssl_handler.get_ca_certs()
try:
context = ssl_handler.make_context(tmp_ca_path, cadata)
except NotImplementedError:
pass
# pre-2.6 versions of python cannot use the custom https
# handler, since the socket class is lacking create_connection.
# Some python builds lack HTTPS support.
if hasattr(socket, 'create_connection') and CustomHTTPSHandler:
kwargs = {}
if HAS_SSLCONTEXT:
kwargs['context'] = context
handlers.append(CustomHTTPSHandler(**kwargs))
handlers.append(RedirectHandlerFactory(follow_redirects, validate_certs, ca_path=ca_path))
# add some nicer cookie handling
if cookies is not None:
handlers.append(urllib_request.HTTPCookieProcessor(cookies))
opener = urllib_request.build_opener(*handlers)
urllib_request.install_opener(opener)
data = to_bytes(data, nonstring='passthru')
request = RequestWithMethod(url, method, data)
# add the custom agent header, to help prevent issues
# with sites that block the default urllib agent string
if http_agent:
request.add_header('User-agent', http_agent)
# Cache control
# Either we directly force a cache refresh
if force:
request.add_header('cache-control', 'no-cache')
# or we do it if the original is more recent than our copy
elif last_mod_time:
tstamp = rfc2822_date_string(last_mod_time.timetuple(), 'GMT')
request.add_header('If-Modified-Since', tstamp)
# user defined headers now, which may override things we've set above
unredirected_headers = [h.lower() for h in (unredirected_headers or [])]
for header in headers:
if header.lower() in unredirected_headers:
request.add_unredirected_header(header, headers[header])
else:
request.add_header(header, headers[header])
r = urllib_request.urlopen(request, None, timeout)
if decompress and r.headers.get('content-encoding', '').lower() == 'gzip':
fp = GzipDecodedReader(r.fp)
if PY3:
r.fp = fp
# Content-Length does not match gzip decoded length
# Prevent ``r.read`` from stopping at Content-Length
r.length = None
else:
# Py2 maps ``r.read`` to ``fp.read``, create new ``addinfourl``
# object to compensate
msg = r.msg
r = urllib_request.addinfourl(
fp,
r.info(),
r.geturl(),
r.getcode()
)
r.msg = msg
return r
def get(self, url, **kwargs):
r"""Sends a GET request. Returns :class:`HTTPResponse` object.
:arg url: URL to request
:kwarg \*\*kwargs: Optional arguments that ``open`` takes.
:returns: HTTPResponse
"""
return self.open('GET', url, **kwargs)
def options(self, url, **kwargs):
r"""Sends a OPTIONS request. Returns :class:`HTTPResponse` object.
:arg url: URL to request
:kwarg \*\*kwargs: Optional arguments that ``open`` takes.
:returns: HTTPResponse
"""
return self.open('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
r"""Sends a HEAD request. Returns :class:`HTTPResponse` object.
:arg url: URL to request
:kwarg \*\*kwargs: Optional arguments that ``open`` takes.
:returns: HTTPResponse
"""
return self.open('HEAD', url, **kwargs)
def post(self, url, data=None, **kwargs):
r"""Sends a POST request. Returns :class:`HTTPResponse` object.
:arg url: URL to request.
:kwarg data: (optional) bytes, or file-like object to send in the body of the request.
:kwarg \*\*kwargs: Optional arguments that ``open`` takes.
:returns: HTTPResponse
"""
return self.open('POST', url, data=data, **kwargs)
def put(self, url, data=None, **kwargs):
r"""Sends a PUT request. Returns :class:`HTTPResponse` object.
:arg url: URL to request.
:kwarg data: (optional) bytes, or file-like object to send in the body of the request.
:kwarg \*\*kwargs: Optional arguments that ``open`` takes.
:returns: HTTPResponse
"""
return self.open('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
r"""Sends a PATCH request. Returns :class:`HTTPResponse` object.
:arg url: URL to request.
:kwarg data: (optional) bytes, or file-like object to send in the body of the request.
:kwarg \*\*kwargs: Optional arguments that ``open`` takes.
:returns: HTTPResponse
"""
return self.open('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
r"""Sends a DELETE request. Returns :class:`HTTPResponse` object.
:arg url: URL to request
:kwargs \*\*kwargs: Optional arguments that ``open`` takes.
:returns: HTTPResponse
"""
return self.open('DELETE', url, **kwargs)
def open_url(url, data=None, headers=None, method=None, use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None,
force_basic_auth=False, follow_redirects='urllib2',
client_cert=None, client_key=None, cookies=None,
use_gssapi=False, unix_socket=None, ca_path=None,
unredirected_headers=None, decompress=True):
'''
Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3)
Does not require the module environment
'''
method = method or ('POST' if data else 'GET')
return Request().open(method, url, data=data, headers=headers, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth, follow_redirects=follow_redirects,
client_cert=client_cert, client_key=client_key, cookies=cookies,
use_gssapi=use_gssapi, unix_socket=unix_socket, ca_path=ca_path,
unredirected_headers=unredirected_headers, decompress=decompress)
def prepare_multipart(fields):
"""Takes a mapping, and prepares a multipart/form-data body
:arg fields: Mapping
:returns: tuple of (content_type, body) where ``content_type`` is
the ``multipart/form-data`` ``Content-Type`` header including
``boundary`` and ``body`` is the prepared bytestring body
Payload content from a file will be base64 encoded and will include
the appropriate ``Content-Transfer-Encoding`` and ``Content-Type``
headers.
Example:
{
"file1": {
"filename": "/bin/true",
"mime_type": "application/octet-stream"
},
"file2": {
"content": "text based file content",
"filename": "fake.txt",
"mime_type": "text/plain",
},
"text_form_field": "value"
}
"""
if not isinstance(fields, Mapping):
raise TypeError(
'Mapping is required, cannot be type %s' % fields.__class__.__name__
)
m = email.mime.multipart.MIMEMultipart('form-data')
for field, value in sorted(fields.items()):
if isinstance(value, string_types):
main_type = 'text'
sub_type = 'plain'
content = value
filename = None
elif isinstance(value, Mapping):
filename = value.get('filename')
content = value.get('content')
if not any((filename, content)):
raise ValueError('at least one of filename or content must be provided')
mime = value.get('mime_type')
if not mime:
try:
mime = mimetypes.guess_type(filename or '', strict=False)[0] or 'application/octet-stream'
except Exception:
mime = 'application/octet-stream'
main_type, sep, sub_type = mime.partition('/')
else:
raise TypeError(
'value must be a string, or mapping, cannot be type %s' % value.__class__.__name__
)
if not content and filename:
with open(to_bytes(filename, errors='surrogate_or_strict'), 'rb') as f:
part = email.mime.application.MIMEApplication(f.read())
del part['Content-Type']
part.add_header('Content-Type', '%s/%s' % (main_type, sub_type))
else:
part = email.mime.nonmultipart.MIMENonMultipart(main_type, sub_type)
part.set_payload(to_bytes(content))
part.add_header('Content-Disposition', 'form-data')
del part['MIME-Version']
part.set_param(
'name',
field,
header='Content-Disposition'
)
if filename:
part.set_param(
'filename',
to_native(os.path.basename(filename)),
header='Content-Disposition'
)
m.attach(part)
if PY3:
# Ensure headers are not split over multiple lines
# The HTTP policy also uses CRLF by default
b_data = m.as_bytes(policy=email.policy.HTTP)
else:
# Py2
# We cannot just call ``as_string`` since it provides no way
# to specify ``maxheaderlen``
fp = cStringIO() # cStringIO seems to be required here
# Ensure headers are not split over multiple lines
g = email.generator.Generator(fp, maxheaderlen=0)
g.flatten(m)
# ``fix_eols`` switches from ``\n`` to ``\r\n``
b_data = email.utils.fix_eols(fp.getvalue())
del m
headers, sep, b_content = b_data.partition(b'\r\n\r\n')
del b_data
if PY3:
parser = email.parser.BytesHeaderParser().parsebytes
else:
# Py2
parser = email.parser.HeaderParser().parsestr
return (
parser(headers)['content-type'], # Message converts to native strings
b_content
)
#
# Module-related functions
#
def basic_auth_header(username, password):
"""Takes a username and password and returns a byte string suitable for
using as value of an Authorization header to do basic auth.
"""
return b"Basic %s" % base64.b64encode(to_bytes("%s:%s" % (username, password), errors='surrogate_or_strict'))
def url_argument_spec():
'''
Creates an argument spec that can be used with any module
that will be requesting content via urllib/urllib2
'''
return dict(
url=dict(type='str'),
force=dict(type='bool', default=False),
http_agent=dict(type='str', default='ansible-httpget'),
use_proxy=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
url_username=dict(type='str'),
url_password=dict(type='str', no_log=True),
force_basic_auth=dict(type='bool', default=False),
client_cert=dict(type='path'),
client_key=dict(type='path'),
use_gssapi=dict(type='bool', default=False),
)
def fetch_url(module, url, data=None, headers=None, method=None,
use_proxy=None, force=False, last_mod_time=None, timeout=10,
use_gssapi=False, unix_socket=None, ca_path=None, cookies=None, unredirected_headers=None,
decompress=True):
"""Sends a request via HTTP(S) or FTP (needs the module as parameter)
:arg module: The AnsibleModule (used to get username, password etc. (s.b.).
:arg url: The url to use.
:kwarg data: The data to be sent (in case of POST/PUT).
:kwarg headers: A dict with the request headers.
:kwarg method: "POST", "PUT", etc.
:kwarg use_proxy: (optional) whether or not to use proxy (Default: True)
:kwarg boolean force: If True: Do not get a cached copy (Default: False)
:kwarg last_mod_time: Default: None
:kwarg int timeout: Default: 10
:kwarg boolean use_gssapi: Default: False
:kwarg unix_socket: (optional) String of file system path to unix socket file to use when establishing
connection to the provided url
:kwarg ca_path: (optional) String of file system path to CA cert bundle to use
:kwarg cookies: (optional) CookieJar object to send with the request
:kwarg unredirected_headers: (optional) A list of headers to not attach on a redirected request
:kwarg decompress: (optional) Whether to attempt to decompress gzip content-encoded responses
:returns: A tuple of (**response**, **info**). Use ``response.read()`` to read the data.
The **info** contains the 'status' and other meta data. When a HttpError (status >= 400)
occurred then ``info['body']`` contains the error response data::
Example::
data={...}
resp, info = fetch_url(module,
"http://example.com",
data=module.jsonify(data),
headers={'Content-type': 'application/json'},
method="POST")
status_code = info["status"]
body = resp.read()
if status_code >= 400 :
body = info['body']
"""
if not HAS_URLPARSE:
module.fail_json(msg='urlparse is not installed')
if not HAS_GZIP and decompress is True:
decompress = False
module.deprecate(
'%s. "decompress" has been automatically disabled to prevent a failure' % GzipDecodedReader.missing_gzip_error(),
version='2.16'
)
# ensure we use proper tempdir
old_tempdir = tempfile.tempdir
tempfile.tempdir = module.tmpdir
# Get validate_certs from the module params
validate_certs = module.params.get('validate_certs', True)
if use_proxy is None:
use_proxy = module.params.get('use_proxy', True)
username = module.params.get('url_username', '')
password = module.params.get('url_password', '')
http_agent = module.params.get('http_agent', 'ansible-httpget')
force_basic_auth = module.params.get('force_basic_auth', '')
follow_redirects = module.params.get('follow_redirects', 'urllib2')
client_cert = module.params.get('client_cert')
client_key = module.params.get('client_key')
use_gssapi = module.params.get('use_gssapi', use_gssapi)
if not isinstance(cookies, cookiejar.CookieJar):
cookies = cookiejar.LWPCookieJar()
r = None
info = dict(url=url, status=-1)
try:
r = open_url(url, data=data, headers=headers, method=method,
use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout,
validate_certs=validate_certs, url_username=username,
url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth,
follow_redirects=follow_redirects, client_cert=client_cert,
client_key=client_key, cookies=cookies, use_gssapi=use_gssapi,
unix_socket=unix_socket, ca_path=ca_path, unredirected_headers=unredirected_headers,
decompress=decompress)
# Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable
info.update(dict((k.lower(), v) for k, v in r.info().items()))
# Don't be lossy, append header values for duplicate headers
# In Py2 there is nothing that needs done, py2 does this for us
if PY3:
temp_headers = {}
for name, value in r.headers.items():
# The same as above, lower case keys to match py2 behavior, and create more consistent results
name = name.lower()
if name in temp_headers:
temp_headers[name] = ', '.join((temp_headers[name], value))
else:
temp_headers[name] = value
info.update(temp_headers)
# parse the cookies into a nice dictionary
cookie_list = []
cookie_dict = dict()
# Python sorts cookies in order of most specific (ie. longest) path first. See ``CookieJar._cookie_attrs``
# Cookies with the same path are reversed from response order.
# This code makes no assumptions about that, and accepts the order given by python
for cookie in cookies:
cookie_dict[cookie.name] = cookie.value
cookie_list.append((cookie.name, cookie.value))
info['cookies_string'] = '; '.join('%s=%s' % c for c in cookie_list)
info['cookies'] = cookie_dict
# finally update the result with a message about the fetch
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.code))
except NoSSLError as e:
distribution = get_distribution()
if distribution is not None and distribution.lower() == 'redhat':
module.fail_json(msg='%s. You can also install python-ssl from EPEL' % to_native(e), **info)
else:
module.fail_json(msg='%s' % to_native(e), **info)
except (ConnectionError, ValueError) as e:
module.fail_json(msg=to_native(e), **info)
except MissingModuleError as e:
module.fail_json(msg=to_text(e), exception=e.import_traceback)
except urllib_error.HTTPError as e:
r = e
try:
if e.fp is None:
# Certain HTTPError objects may not have the ability to call ``.read()`` on Python 3
# This is not handled gracefully in Python 3, and instead an exception is raised from
# tempfile, due to ``urllib.response.addinfourl`` not being initialized
raise AttributeError
body = e.read()
except AttributeError:
body = ''
else:
e.close()
# Try to add exception info to the output but don't fail if we can't
try:
# Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable
info.update(dict((k.lower(), v) for k, v in e.info().items()))
except Exception:
pass
info.update({'msg': to_native(e), 'body': body, 'status': e.code})
except urllib_error.URLError as e:
code = int(getattr(e, 'code', -1))
info.update(dict(msg="Request failed: %s" % to_native(e), status=code))
except socket.error as e:
info.update(dict(msg="Connection failure: %s" % to_native(e), status=-1))
except httplib.BadStatusLine as e:
info.update(dict(msg="Connection failure: connection was closed before a valid response was received: %s" % to_native(e.line), status=-1))
except Exception as e:
info.update(dict(msg="An unknown error occurred: %s" % to_native(e), status=-1),
exception=traceback.format_exc())
finally:
tempfile.tempdir = old_tempdir
return r, info
def fetch_file(module, url, data=None, headers=None, method=None,
use_proxy=True, force=False, last_mod_time=None, timeout=10,
unredirected_headers=None, decompress=True):
'''Download and save a file via HTTP(S) or FTP (needs the module as parameter).
This is basically a wrapper around fetch_url().
:arg module: The AnsibleModule (used to get username, password etc. (s.b.).
:arg url: The url to use.
:kwarg data: The data to be sent (in case of POST/PUT).
:kwarg headers: A dict with the request headers.
:kwarg method: "POST", "PUT", etc.
:kwarg boolean use_proxy: Default: True
:kwarg boolean force: If True: Do not get a cached copy (Default: False)
:kwarg last_mod_time: Default: None
:kwarg int timeout: Default: 10
:kwarg unredirected_headers: (optional) A list of headers to not attach on a redirected request
:kwarg decompress: (optional) Whether to attempt to decompress gzip content-encoded responses
:returns: A string, the path to the downloaded file.
'''
# download file
bufsize = 65536
parts = urlparse(url)
file_name, file_ext = os.path.splitext(os.path.basename(parts.path))
fetch_temp_file = tempfile.NamedTemporaryFile(dir=module.tmpdir, prefix=file_name, suffix=file_ext, delete=False)
module.add_cleanup_file(fetch_temp_file.name)
try:
rsp, info = fetch_url(module, url, data, headers, method, use_proxy, force, last_mod_time, timeout,
unredirected_headers=unredirected_headers, decompress=decompress)
if not rsp:
module.fail_json(msg="Failure downloading %s, %s" % (url, info['msg']))
data = rsp.read(bufsize)
while data:
fetch_temp_file.write(data)
data = rsp.read(bufsize)
fetch_temp_file.close()
except Exception as e:
module.fail_json(msg="Failure downloading %s, %s" % (url, to_native(e)))
return fetch_temp_file.name
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,604 |
ansible/docs/docsite/rst/galaxy/user_guide.rst lacks documentation for the -p parameter
|
### Summary
I was reading https://docs.ansible.com/ansible/latest/galaxy/user_guide.html and was trying to understand how to specify which path to use to install a galaxy collection. I intuited that the `-p` parameter was the correct one, and it ended up being correct.
It would be helpful to explain (1) all of the available parameters, (2) what they do, and (3) what their defaults are.
### Issue Type
Documentation Report
### Component Name
ansible/docs/docsite/rst/galaxy/user_guide.rst
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.12]
config file = None
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.7/site-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.7.10 (default, Jun 3 2021, 00:02:01) [GCC 7.3.1 20180712 (Red Hat 7.3.1-13)]
jinja version = 3.1.2
libyaml = True
```
### Configuration
```console
N/A
```
### OS / Environment
N/A
### Additional Information
N/A
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78604
|
https://github.com/ansible/ansible/pull/78649
|
4260b71cc77b7a44e061668d0d408d847f550156
|
b8abcd17a80969349e605fd3f450bf43845e4177
| 2022-08-19T23:01:32Z |
python
| 2022-09-01T18:20:45Z |
docs/docsite/rst/galaxy/user_guide.rst
|
.. _using_galaxy:
.. _ansible_galaxy:
*****************
Galaxy User Guide
*****************
:dfn:`Ansible Galaxy` refers to the `Galaxy <https://galaxy.ansible.com>`_ website, a free site for finding, downloading, and sharing community developed roles.
Use Galaxy to jump-start your automation project with great content from the Ansible community. Galaxy provides pre-packaged units of work such as :ref:`roles <playbooks_reuse_roles>`, and new in Galaxy 3.2, :ref:`collections <collections>`
You can find roles for provisioning infrastructure, deploying applications, and all of the tasks you do everyday. The collection format provides a comprehensive package of automation that may include multiple playbooks, roles, modules, and plugins.
.. contents::
:local:
:depth: 2
.. _finding_galaxy_collections:
Finding collections on Galaxy
=============================
To find collections on Galaxy:
#. Click the :guilabel:`Search` icon in the left-hand navigation.
#. Set the filter to *collection*.
#. Set other filters and press :guilabel:`enter`.
Galaxy presents a list of collections that match your search criteria.
.. _installing_galaxy_collections:
Installing collections
======================
Installing a collection from Galaxy
-----------------------------------
.. include:: ../shared_snippets/installing_collections.txt
.. _installing_ah_collection:
Downloading a collection from Automation Hub
----------------------------------------------------
You can download collections from Automation Hub at the command line. Automation Hub content is available to subscribers only, so you must download an API token and configure your local environment to provide it before you can you download collections. To download a collection from Automation Hub with the ``ansible-galaxy`` command:
1. Get your Automation Hub API token. Go to https://cloud.redhat.com/ansible/automation-hub/token/ and click :guilabel:`Load token` from the version dropdown to copy your API token.
2. Configure Red Hat Automation Hub server in the ``server_list`` option under the ``[galaxy]`` section in your :file:`ansible.cfg` file.
.. code-block:: ini
[galaxy]
server_list = automation_hub
[galaxy_server.automation_hub]
url=https://console.redhat.com/api/automation-hub/
auth_url=https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token
token=my_ah_token
3. Download the collection hosted in Automation Hub.
.. code-block:: bash
ansible-galaxy collection install my_namespace.my_collection
.. seealso::
`Getting started with Automation Hub <https://www.ansible.com/blog/getting-started-with-ansible-hub>`_
An introduction to Automation Hub
Installing an older version of a collection
-------------------------------------------
.. include:: ../shared_snippets/installing_older_collection.txt
Install multiple collections with a requirements file
-----------------------------------------------------
.. include:: ../shared_snippets/installing_multiple_collections.txt
Downloading a collection for offline use
-----------------------------------------
.. include:: ../shared_snippets/download_tarball_collections.txt
Installing a collection from source files
-----------------------------------------
.. include:: ../shared_snippets/installing_collections_file.rst
Installing a collection from a git repository
---------------------------------------------
.. include:: ../shared_snippets/installing_collections_git_repo.txt
Listing installed collections
-----------------------------
To list installed collections, run ``ansible-galaxy collection list``. See :ref:`collections_listing` for more details.
Configuring the ``ansible-galaxy`` client
------------------------------------------
.. include:: ../shared_snippets/galaxy_server_list.txt
.. _finding_galaxy_roles:
Finding roles on Galaxy
=======================
Search the Galaxy database by tags, platforms, author and multiple keywords. For example:
.. code-block:: bash
$ ansible-galaxy search elasticsearch --author geerlingguy
The search command will return a list of the first 1000 results matching your search:
.. code-block:: text
Found 2 roles matching your search:
Name Description
---- -----------
geerlingguy.elasticsearch Elasticsearch for Linux.
geerlingguy.elasticsearch-curator Elasticsearch curator for Linux.
Get more information about a role
---------------------------------
Use the ``info`` command to view more detail about a specific role:
.. code-block:: bash
$ ansible-galaxy info username.role_name
This returns everything found in Galaxy for the role:
.. code-block:: text
Role: username.role_name
description: Installs and configures a thing, a distributed, highly available NoSQL thing.
active: True
commit: c01947b7bc89ebc0b8a2e298b87ab416aed9dd57
commit_message: Adding travis
commit_url: https://github.com/username/repo_name/commit/c01947b7bc89ebc0b8a2e298b87ab
company: My Company, Inc.
created: 2015-12-08T14:17:52.773Z
download_count: 1
forks_count: 0
github_branch:
github_repo: repo_name
github_user: username
id: 6381
is_valid: True
issue_tracker_url:
license: Apache
min_ansible_version: 1.4
modified: 2015-12-08T18:43:49.085Z
namespace: username
open_issues_count: 0
path: /Users/username/projects/roles
scm: None
src: username.repo_name
stargazers_count: 0
travis_status_url: https://travis-ci.org/username/repo_name.svg?branch=main
version:
watchers_count: 1
.. _installing_galaxy_roles:
Installing roles from Galaxy
============================
The ``ansible-galaxy`` command comes bundled with Ansible, and you can use it to install roles from Galaxy or directly from a git based SCM. You can
also use it to create a new role, remove roles, or perform tasks on the Galaxy website.
The command line tool by default communicates with the Galaxy website API using the server address *https://galaxy.ansible.com*. If you run your own internal Galaxy server
and want to use it instead of the default one, pass the ``--server`` option following the address of this galaxy server. You can set permanently this option by setting
the Galaxy server value in your ``ansible.cfg`` file to use it . For information on setting the value in *ansible.cfg* see :ref:`galaxy_server`.
Installing roles
----------------
Use the ``ansible-galaxy`` command to download roles from the `Galaxy website <https://galaxy.ansible.com>`_
.. code-block:: bash
$ ansible-galaxy install namespace.role_name
Setting where to install roles
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
By default, Ansible downloads roles to the first writable directory in the default list of paths ``~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles``. This installs roles in the home directory of the user running ``ansible-galaxy``.
You can override this with one of the following options:
* Set the environment variable :envvar:`ANSIBLE_ROLES_PATH` in your session.
* Use the ``--roles-path`` option for the ``ansible-galaxy`` command.
* Define ``roles_path`` in an ``ansible.cfg`` file.
The following provides an example of using ``--roles-path`` to install the role into the current working directory:
.. code-block:: bash
$ ansible-galaxy install --roles-path . geerlingguy.apache
.. seealso::
:ref:`intro_configuration`
All about configuration files
Installing a specific version of a role
---------------------------------------
When the Galaxy server imports a role, it imports any git tags matching the `Semantic Version <https://semver.org/>`_ format as versions.
In turn, you can download a specific version of a role by specifying one of the imported tags.
To see the available versions for a role:
#. Locate the role on the Galaxy search page.
#. Click on the name to view more details, including the available versions.
You can also navigate directly to the role using the /<namespace>/<role name>. For example, to view the role geerlingguy.apache, go to `<https://galaxy.ansible.com/geerlingguy/apache>`_.
To install a specific version of a role from Galaxy, append a comma and the value of a GitHub release tag. For example:
.. code-block:: bash
$ ansible-galaxy install geerlingguy.apache,1.0.0
It is also possible to point directly to the git repository and specify a branch name or commit hash as the version. For example, the following will
install a specific commit:
.. code-block:: bash
$ ansible-galaxy install git+https://github.com/geerlingguy/ansible-role-apache.git,0b7cd353c0250e87a26e0499e59e7fd265cc2f25
Installing multiple roles from a file
-------------------------------------
You can install multiple roles by including the roles in a :file:`requirements.yml` file. The format of the file is YAML, and the
file extension must be either *.yml* or *.yaml*.
Use the following command to install roles included in :file:`requirements.yml:`
.. code-block:: bash
$ ansible-galaxy install -r requirements.yml
Again, the extension is important. If the *.yml* extension is left off, the ``ansible-galaxy`` CLI assumes the file is in an older, now deprecated,
"basic" format.
Each role in the file will have one or more of the following attributes:
src
The source of the role. Use the format *namespace.role_name*, if downloading from Galaxy; otherwise, provide a URL pointing
to a repository within a git based SCM. See the examples below. This is a required attribute.
scm
Specify the SCM. As of this writing only *git* or *hg* are allowed. See the examples below. Defaults to *git*.
version:
The version of the role to download. Provide a release tag value, commit hash, or branch name. Defaults to the branch set as a default in the repository, otherwise defaults to the *master*.
name:
Download the role to a specific name. Defaults to the Galaxy name when downloading from Galaxy, otherwise it defaults
to the name of the repository.
Use the following example as a guide for specifying roles in *requirements.yml*:
.. code-block:: yaml
# from galaxy
- name: yatesr.timezone
# from locally cloned git repository (git+file:// requires full paths)
- src: git+file:///home/bennojoy/nginx
# from GitHub
- src: https://github.com/bennojoy/nginx
# from GitHub, overriding the name and specifying a specific tag
- name: nginx_role
src: https://github.com/bennojoy/nginx
version: main
# from GitHub, specifying a specific commit hash
- src: https://github.com/bennojoy/nginx
version: "ee8aa41"
# from a webserver, where the role is packaged in a tar.gz
- name: http-role-gz
src: https://some.webserver.example.com/files/main.tar.gz
# from a webserver, where the role is packaged in a tar.bz2
- name: http-role-bz2
src: https://some.webserver.example.com/files/main.tar.bz2
# from a webserver, where the role is packaged in a tar.xz (Python 3.x only)
- name: http-role-xz
src: https://some.webserver.example.com/files/main.tar.xz
# from Bitbucket
- src: git+https://bitbucket.org/willthames/git-ansible-galaxy
version: v1.4
# from Bitbucket, alternative syntax and caveats
- src: https://bitbucket.org/willthames/hg-ansible-galaxy
scm: hg
# from GitLab or other git-based scm, using git+ssh
- src: [email protected]:mygroup/ansible-core.git
scm: git
version: "0.1" # quoted, so YAML doesn't parse this as a floating-point value
.. warning::
Embedding credentials into a SCM URL is not secure. Make sure to use safe auth options for security reasons. For example, use `SSH <https://help.github.com/en/github/authenticating-to-github/connecting-to-github-with-ssh>`_, `netrc <https://linux.die.net/man/5/netrc>`_ or `http.extraHeader <https://git-scm.com/docs/git-config#Documentation/git-config.txt-httpextraHeader>`_/`url.<base>.pushInsteadOf <https://git-scm.com/docs/git-config#Documentation/git-config.txt-urlltbasegtpushInsteadOf>`_ in Git config to prevent your creds from being exposed in logs.
Installing roles and collections from the same requirements.yml file
---------------------------------------------------------------------
You can install roles and collections from the same requirements files
.. code-block:: yaml
---
roles:
# Install a role from Ansible Galaxy.
- name: geerlingguy.java
version: 1.9.6
collections:
# Install a collection from Ansible Galaxy.
- name: geerlingguy.php_roles
version: 0.9.3
source: https://galaxy.ansible.com
Installing multiple roles from multiple files
---------------------------------------------
For large projects, the ``include`` directive in a :file:`requirements.yml` file provides the ability to split a large file into multiple smaller files.
For example, a project may have a :file:`requirements.yml` file, and a :file:`webserver.yml` file.
Below are the contents of the :file:`webserver.yml` file:
.. code-block:: bash
# from github
- src: https://github.com/bennojoy/nginx
# from Bitbucket
- src: git+https://bitbucket.org/willthames/git-ansible-galaxy
version: v1.4
The following shows the contents of the :file:`requirements.yml` file that now includes the :file:`webserver.yml` file:
.. code-block:: bash
# from galaxy
- name: yatesr.timezone
- include: <path_to_requirements>/webserver.yml
To install all the roles from both files, pass the root file, in this case :file:`requirements.yml` on the
command line, as follows:
.. code-block:: bash
$ ansible-galaxy install -r requirements.yml
.. _galaxy_dependencies:
Dependencies
------------
Roles can also be dependent on other roles, and when you install a role that has dependencies, those dependencies will automatically be installed to the ``roles_path``.
There are two ways to define the dependencies of a role:
* using ``meta/requirements.yml``
* using ``meta/main.yml``
Using ``meta/requirements.yml``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. versionadded:: 2.10
You can create the file ``meta/requirements.yml`` and define dependencies in the same format used for :file:`requirements.yml` described in the `Installing multiple roles from a file`_ section.
From there, you can import or include the specified roles in your tasks.
Using ``meta/main.yml``
^^^^^^^^^^^^^^^^^^^^^^^
Alternatively, you can specify role dependencies in the ``meta/main.yml`` file by providing a list of roles under the ``dependencies`` section. If the source of a role is Galaxy, you can simply specify the role in
the format ``namespace.role_name``. You can also use the more complex format in :file:`requirements.yml`, allowing you to provide ``src``, ``scm``, ``version``, and ``name``.
Dependencies installed that way, depending on other factors described below, will also be executed **before** this role is executed during play execution.
To better understand how dependencies are handled during play execution, see :ref:`playbooks_reuse_roles`.
The following shows an example ``meta/main.yml`` file with dependent roles:
.. code-block:: yaml
---
dependencies:
- geerlingguy.java
galaxy_info:
author: geerlingguy
description: Elasticsearch for Linux.
company: "Midwestern Mac, LLC"
license: "license (BSD, MIT)"
min_ansible_version: 2.4
platforms:
- name: EL
versions:
- all
- name: Debian
versions:
- all
- name: Ubuntu
versions:
- all
galaxy_tags:
- web
- system
- monitoring
- logging
- lucene
- elk
- elasticsearch
Tags are inherited *down* the dependency chain. In order for tags to be applied to a role and all its dependencies, the tag should be applied to the role, not to all the tasks within a role.
Roles listed as dependencies are subject to conditionals and tag filtering, and may not execute fully depending on
what tags and conditionals are applied.
If the source of a role is Galaxy, specify the role in the format *namespace.role_name*:
.. code-block:: yaml
dependencies:
- geerlingguy.apache
- geerlingguy.ansible
Alternately, you can specify the role dependencies in the complex form used in :file:`requirements.yml` as follows:
.. code-block:: yaml
dependencies:
- name: geerlingguy.ansible
- name: composer
src: git+https://github.com/geerlingguy/ansible-role-composer.git
version: 775396299f2da1f519f0d8885022ca2d6ee80ee8
.. note::
Galaxy expects all role dependencies to exist in Galaxy, and therefore dependencies to be specified in the
``namespace.role_name`` format. If you import a role with a dependency where the ``src`` value is a URL, the import process will fail.
List installed roles
--------------------
Use ``list`` to show the name and version of each role installed in the *roles_path*.
.. code-block:: bash
$ ansible-galaxy list
- ansible-network.network-engine, v2.7.2
- ansible-network.config_manager, v2.6.2
- ansible-network.cisco_nxos, v2.7.1
- ansible-network.vyos, v2.7.3
- ansible-network.cisco_ios, v2.7.0
Remove an installed role
------------------------
Use ``remove`` to delete a role from *roles_path*:
.. code-block:: bash
$ ansible-galaxy remove namespace.role_name
.. seealso::
:ref:`collections`
Shareable collections of modules, playbooks and roles
:ref:`playbooks_reuse_roles`
Reusable tasks, handlers, and other files in a known directory structure
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,559 |
Add a section about boolean variables
|
### Summary
Ansible accepts a broad range of values for boolean variables: `true/false`, `1/0`, `yes/no`, `True/False`, and so on. We recently reached consensus about representing all Boolean values as `true/false` in the module documentation generated from python docstrings - see [this vote](https://github.com/ansible-community/community-topics/discussions/120) and [this issue](https://github.com/ansible-community/community-topics/issues/116) and [this PR](https://github.com/ansible-community/antsibull-docs/pull/19). We plan to update examples and other mentions of boolean values in the docs over the next few months.
The docs should also show users that they do not have to use `true/false` if they choose not to. Adding a section to the [documentation on variables](https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#playbooks-variables) that explains all this will help users.
### Issue Type
Documentation Report
### Component Name
docs/docsite/rst/user_guide/playbooks_variables.rst
### Ansible Version
```console
N/A
```
### Configuration
```console
N/A
```
### OS / Environment
N/A
### Additional Information
None
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78559
|
https://github.com/ansible/ansible/pull/78653
|
5f2bb2ba93293f43df4b4134b428de4abbd8d0dd
|
077e1bfe17b99d805dfa94ad9f67e2384077808c
| 2022-08-16T15:27:47Z |
python
| 2022-09-01T18:28:03Z |
docs/docsite/rst/playbook_guide/playbooks_variables.rst
|
.. _playbooks_variables:
***************
Using Variables
***************
Ansible uses variables to manage differences between systems. With Ansible, you can execute tasks and playbooks on multiple different systems with a single command. To represent the variations among those different systems, you can create variables with standard YAML syntax, including lists and dictionaries. You can define these variables in your playbooks, in your :ref:`inventory <intro_inventory>`, in re-usable :ref:`files <playbooks_reuse>` or :ref:`roles <playbooks_reuse_roles>`, or at the command line. You can also create variables during a playbook run by registering the return value or values of a task as a new variable.
After you create variables, either by defining them in a file, passing them at the command line, or registering the return value or values of a task as a new variable, you can use those variables in module arguments, in :ref:`conditional "when" statements <playbooks_conditionals>`, in :ref:`templates <playbooks_templating>`, and in :ref:`loops <playbooks_loops>`. The `ansible-examples github repository <https://github.com/ansible/ansible-examples>`_ contains many examples of using variables in Ansible.
Once you understand the concepts and examples on this page, read about :ref:`Ansible facts <vars_and_facts>`, which are variables you retrieve from remote systems.
.. contents::
:local:
.. _valid_variable_names:
Creating valid variable names
=============================
Not all strings are valid Ansible variable names. A variable name can only include letters, numbers, and underscores. `Python keywords`_ or :ref:`playbook keywords<playbook_keywords>` are not valid variable names. A variable name cannot begin with a number.
Variable names can begin with an underscore. In many programming languages, variables that begin with an underscore are private. This is not true in Ansible. Variables that begin with an underscore are treated exactly the same as any other variable. Do not rely on this convention for privacy or security.
This table gives examples of valid and invalid variable names:
.. table::
:class: documentation-table
====================== ====================================================================
Valid variable names Not valid
====================== ====================================================================
``foo`` ``*foo``, `Python keywords`_ such as ``async`` and ``lambda``
``foo_env`` :ref:`playbook keywords<playbook_keywords>` such as ``environment``
``foo_port`` ``foo-port``, ``foo port``, ``foo.port``
``foo5``, ``_foo`` ``5foo``, ``12``
====================== ====================================================================
.. _Python keywords: https://docs.python.org/3/reference/lexical_analysis.html#keywords
Simple variables
================
Simple variables combine a variable name with a single value. You can use this syntax (and the syntax for lists and dictionaries shown below) in a variety of places. For details about setting variables in inventory, in playbooks, in reusable files, in roles, or at the command line, see :ref:`setting_variables`.
Defining simple variables
-------------------------
You can define a simple variable using standard YAML syntax. For example:
.. code-block:: text
remote_install_path: /opt/my_app_config
Referencing simple variables
----------------------------
After you define a variable, use Jinja2 syntax to reference it. Jinja2 variables use double curly braces. For example, the expression ``My amp goes to {{ max_amp_value }}`` demonstrates the most basic form of variable substitution. You can use Jinja2 syntax in playbooks. For example:
.. code-block:: yaml+jinja
ansible.builtin.template:
src: foo.cfg.j2
dest: '{{ remote_install_path }}/foo.cfg'
In this example, the variable defines the location of a file, which can vary from one system to another.
.. note::
Ansible allows Jinja2 loops and conditionals in :ref:`templates <playbooks_templating>` but not in playbooks. You cannot create a loop of tasks. Ansible playbooks are pure machine-parseable YAML.
.. _yaml_gotchas:
When to quote variables (a YAML gotcha)
=======================================
If you start a value with ``{{ foo }}``, you must quote the whole expression to create valid YAML syntax. If you do not quote the whole expression, the YAML parser cannot interpret the syntax - it might be a variable or it might be the start of a YAML dictionary. For guidance on writing YAML, see the :ref:`yaml_syntax` documentation.
If you use a variable without quotes like this:
.. code-block:: yaml+jinja
- hosts: app_servers
vars:
app_path: {{ base_path }}/22
You will see: ``ERROR! Syntax Error while loading YAML.`` If you add quotes, Ansible works correctly:
.. code-block:: yaml+jinja
- hosts: app_servers
vars:
app_path: "{{ base_path }}/22"
.. _list_variables:
List variables
==============
A list variable combines a variable name with multiple values. The multiple values can be stored as an itemized list or in square brackets ``[]``, separated with commas.
Defining variables as lists
---------------------------
You can define variables with multiple values using YAML lists. For example:
.. code-block:: yaml
region:
- northeast
- southeast
- midwest
Referencing list variables
--------------------------
When you use variables defined as a list (also called an array), you can use individual, specific fields from that list. The first item in a list is item 0, the second item is item 1. For example:
.. code-block:: yaml+jinja
region: "{{ region[0] }}"
The value of this expression would be "northeast".
.. _dictionary_variables:
Dictionary variables
====================
A dictionary stores the data in key-value pairs. Usually, dictionaries are used to store related data, such as the information contained in an ID or a user profile.
Defining variables as key:value dictionaries
--------------------------------------------
You can define more complex variables using YAML dictionaries. A YAML dictionary maps keys to values. For example:
.. code-block:: yaml
foo:
field1: one
field2: two
Referencing key:value dictionary variables
------------------------------------------
When you use variables defined as a key:value dictionary (also called a hash), you can use individual, specific fields from that dictionary using either bracket notation or dot notation:
.. code-block:: yaml
foo['field1']
foo.field1
Both of these examples reference the same value ("one"). Bracket notation always works. Dot notation can cause problems because some keys collide with attributes and methods of python dictionaries. Use bracket notation if you use keys which start and end with two underscores (which are reserved for special meanings in python) or are any of the known public attributes:
``add``, ``append``, ``as_integer_ratio``, ``bit_length``, ``capitalize``, ``center``, ``clear``, ``conjugate``, ``copy``, ``count``, ``decode``, ``denominator``, ``difference``, ``difference_update``, ``discard``, ``encode``, ``endswith``, ``expandtabs``, ``extend``, ``find``, ``format``, ``fromhex``, ``fromkeys``, ``get``, ``has_key``, ``hex``, ``imag``, ``index``, ``insert``, ``intersection``, ``intersection_update``, ``isalnum``, ``isalpha``, ``isdecimal``, ``isdigit``, ``isdisjoint``, ``is_integer``, ``islower``, ``isnumeric``, ``isspace``, ``issubset``, ``issuperset``, ``istitle``, ``isupper``, ``items``, ``iteritems``, ``iterkeys``, ``itervalues``, ``join``, ``keys``, ``ljust``, ``lower``, ``lstrip``, ``numerator``, ``partition``, ``pop``, ``popitem``, ``real``, ``remove``, ``replace``, ``reverse``, ``rfind``, ``rindex``, ``rjust``, ``rpartition``, ``rsplit``, ``rstrip``, ``setdefault``, ``sort``, ``split``, ``splitlines``, ``startswith``, ``strip``, ``swapcase``, ``symmetric_difference``, ``symmetric_difference_update``, ``title``, ``translate``, ``union``, ``update``, ``upper``, ``values``, ``viewitems``, ``viewkeys``, ``viewvalues``, ``zfill``.
.. _registered_variables:
Registering variables
=====================
You can create variables from the output of an Ansible task with the task keyword ``register``. You can use registered variables in any later tasks in your play. For example:
.. code-block:: yaml
- hosts: web_servers
tasks:
- name: Run a shell command and register its output as a variable
ansible.builtin.shell: /usr/bin/foo
register: foo_result
ignore_errors: true
- name: Run a shell command using output of the previous task
ansible.builtin.shell: /usr/bin/bar
when: foo_result.rc == 5
For more examples of using registered variables in conditions on later tasks, see :ref:`playbooks_conditionals`. Registered variables may be simple variables, list variables, dictionary variables, or complex nested data structures. The documentation for each module includes a ``RETURN`` section describing the return values for that module. To see the values for a particular task, run your playbook with ``-v``.
Registered variables are stored in memory. You cannot cache registered variables for use in future playbook runs. Registered variables are only valid on the host for the rest of the current playbook run, including subsequent plays within the same playbook run.
Registered variables are host-level variables. When you register a variable in a task with a loop, the registered variable contains a value for each item in the loop. The data structure placed in the variable during the loop will contain a ``results`` attribute, that is a list of all responses from the module. For a more in-depth example of how this works, see the :ref:`playbooks_loops` section on using register with a loop.
.. note:: If a task fails or is skipped, Ansible still registers a variable with a failure or skipped status, unless the task is skipped based on tags. See :ref:`tags` for information on adding and using tags.
.. _accessing_complex_variable_data:
Referencing nested variables
============================
Many registered variables (and :ref:`facts <vars_and_facts>`) are nested YAML or JSON data structures. You cannot access values from these nested data structures with the simple ``{{ foo }}`` syntax. You must use either bracket notation or dot notation. For example, to reference an IP address from your facts using the bracket notation:
.. code-block:: yaml+jinja
{{ ansible_facts["eth0"]["ipv4"]["address"] }}
To reference an IP address from your facts using the dot notation:
.. code-block:: yaml+jinja
{{ ansible_facts.eth0.ipv4.address }}
.. _about_jinja2:
.. _jinja2_filters:
Transforming variables with Jinja2 filters
==========================================
Jinja2 filters let you transform the value of a variable within a template expression. For example, the ``capitalize`` filter capitalizes any value passed to it; the ``to_yaml`` and ``to_json`` filters change the format of your variable values. Jinja2 includes many `built-in filters <https://jinja.palletsprojects.com/templates/#builtin-filters>`_ and Ansible supplies many more filters. To find more examples of filters, see :ref:`playbooks_filters`.
.. _setting_variables:
Where to set variables
======================
You can define variables in a variety of places, such as in inventory, in playbooks, in reusable files, in roles, and at the command line. Ansible loads every possible variable it finds, then chooses the variable to apply based on :ref:`variable precedence rules <ansible_variable_precedence>`.
.. _define_variables_in_inventory:
Defining variables in inventory
-------------------------------
You can define different variables for each individual host, or set shared variables for a group of hosts in your inventory. For example, if all machines in the ``[Boston]`` group use 'boston.ntp.example.com' as an NTP server, you can set a group variable. The :ref:`intro_inventory` page has details on setting :ref:`host variables <host_variables>` and :ref:`group variables <group_variables>` in inventory.
.. _playbook_variables:
Defining variables in a play
----------------------------
You can define variables directly in a playbook play:
.. code-block:: yaml
- hosts: webservers
vars:
http_port: 80
When you define variables in a play, they are only visible to tasks executed in that play.
.. _included_variables:
.. _variable_file_separation_details:
Defining variables in included files and roles
----------------------------------------------
You can define variables in reusable variables files and/or in reusable roles. When you define variables in reusable variable files, the sensitive variables are separated from playbooks. This separation enables you to store your playbooks in a source control software and even share the playbooks, without the risk of exposing passwords or other sensitive and personal data. For information about creating reusable files and roles, see :ref:`playbooks_reuse`.
This example shows how you can include variables defined in an external file:
.. code-block:: yaml
---
- hosts: all
remote_user: root
vars:
favcolor: blue
vars_files:
- /vars/external_vars.yml
tasks:
- name: This is just a placeholder
ansible.builtin.command: /bin/echo foo
The contents of each variables file is a simple YAML dictionary. For example:
.. code-block:: yaml
---
# in the above example, this would be vars/external_vars.yml
somevar: somevalue
password: magic
.. note::
You can keep per-host and per-group variables in similar files. To learn about organizing your variables, see :ref:`splitting_out_vars`.
.. _passing_variables_on_the_command_line:
Defining variables at runtime
-----------------------------
You can define variables when you run your playbook by passing variables at the command line using the ``--extra-vars`` (or ``-e``) argument. You can also request user input with a ``vars_prompt`` (see :ref:`playbooks_prompts`). When you pass variables at the command line, use a single quoted string, that contains one or more variables, in one of the formats below.
key=value format
^^^^^^^^^^^^^^^^
Values passed in using the ``key=value`` syntax are interpreted as strings. Use the JSON format if you need to pass non-string values such as Booleans, integers, floats, lists, and so on.
.. code-block:: text
ansible-playbook release.yml --extra-vars "version=1.23.45 other_variable=foo"
JSON string format
^^^^^^^^^^^^^^^^^^
.. code-block:: shell
ansible-playbook release.yml --extra-vars '{"version":"1.23.45","other_variable":"foo"}'
ansible-playbook arcade.yml --extra-vars '{"pacman":"mrs","ghosts":["inky","pinky","clyde","sue"]}'
When passing variables with ``--extra-vars``, you must escape quotes and other special characters appropriately for both your markup (for example, JSON), and for your shell:
.. code-block:: shell
ansible-playbook arcade.yml --extra-vars "{\"name\":\"Conan O\'Brien\"}"
ansible-playbook arcade.yml --extra-vars '{"name":"Conan O'\\\''Brien"}'
ansible-playbook script.yml --extra-vars "{\"dialog\":\"He said \\\"I just can\'t get enough of those single and double-quotes"\!"\\\"\"}"
If you have a lot of special characters, use a JSON or YAML file containing the variable definitions.
vars from a JSON or YAML file
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code-block:: text
ansible-playbook release.yml --extra-vars "@some_file.json"
.. _ansible_variable_precedence:
Variable precedence: Where should I put a variable?
===================================================
You can set multiple variables with the same name in many different places. When you do this, Ansible loads every possible variable it finds, then chooses the variable to apply based on variable precedence. In other words, the different variables will override each other in a certain order.
Teams and projects that agree on guidelines for defining variables (where to define certain types of variables) usually avoid variable precedence concerns. We suggest that you define each variable in one place: figure out where to define a variable, and keep it simple. For examples, see :ref:`variable_examples`.
Some behavioral parameters that you can set in variables you can also set in Ansible configuration, as command-line options, and using playbook keywords. For example, you can define the user Ansible uses to connect to remote devices as a variable with ``ansible_user``, in a configuration file with ``DEFAULT_REMOTE_USER``, as a command-line option with ``-u``, and with the playbook keyword ``remote_user``. If you define the same parameter in a variable and by another method, the variable overrides the other setting. This approach allows host-specific settings to override more general settings. For examples and more details on the precedence of these various settings, see :ref:`general_precedence_rules`.
Understanding variable precedence
---------------------------------
Ansible does apply variable precedence, and you might have a use for it. Here is the order of precedence from least to greatest (the last listed variables override all other variables):
#. command line values (for example, ``-u my_user``, these are not variables)
#. role defaults (defined in role/defaults/main.yml) [1]_
#. inventory file or script group vars [2]_
#. inventory group_vars/all [3]_
#. playbook group_vars/all [3]_
#. inventory group_vars/* [3]_
#. playbook group_vars/* [3]_
#. inventory file or script host vars [2]_
#. inventory host_vars/* [3]_
#. playbook host_vars/* [3]_
#. host facts / cached set_facts [4]_
#. play vars
#. play vars_prompt
#. play vars_files
#. role vars (defined in role/vars/main.yml)
#. block vars (only for tasks in block)
#. task vars (only for the task)
#. include_vars
#. set_facts / registered vars
#. role (and include_role) params
#. include params
#. extra vars (for example, ``-e "user=my_user"``)(always win precedence)
In general, Ansible gives precedence to variables that were defined more recently, more actively, and with more explicit scope. Variables in the defaults folder inside a role are easily overridden. Anything in the vars directory of the role overrides previous versions of that variable in the namespace. Host and/or inventory variables override role defaults, but explicit includes such as the vars directory or an ``include_vars`` task override inventory variables.
Ansible merges different variables set in inventory so that more specific settings override more generic settings. For example, ``ansible_ssh_user`` specified as a group_var is overridden by ``ansible_user`` specified as a host_var. For details about the precedence of variables set in inventory, see :ref:`how_we_merge`.
.. rubric:: Footnotes
.. [1] Tasks in each role see their own role's defaults. Tasks defined outside of a role see the last role's defaults.
.. [2] Variables defined in inventory file or provided by dynamic inventory.
.. [3] Includes vars added by 'vars plugins' as well as host_vars and group_vars which are added by the default vars plugin shipped with Ansible.
.. [4] When created with set_facts's cacheable option, variables have the high precedence in the play,
but are the same as a host facts precedence when they come from the cache.
.. note:: Within any section, redefining a var overrides the previous instance.
If multiple groups have the same variable, the last one loaded wins.
If you define a variable twice in a play's ``vars:`` section, the second one wins.
.. note:: The previous describes the default config ``hash_behaviour=replace``, switch to ``merge`` to only partially overwrite.
.. _variable_scopes:
Scoping variables
-----------------
You can decide where to set a variable based on the scope you want that value to have. Ansible has three main scopes:
* Global: this is set by config, environment variables and the command line
* Play: each play and contained structures, vars entries (vars; vars_files; vars_prompt), role defaults and vars.
* Host: variables directly associated to a host, like inventory, include_vars, facts or registered task outputs
Inside a template, you automatically have access to all variables that are in scope for a host, plus any registered variables, facts, and magic variables.
.. _variable_examples:
Tips on where to set variables
------------------------------
You should choose where to define a variable based on the kind of control you might want over values.
Set variables in inventory that deal with geography or behavior. Since groups are frequently the entity that maps roles onto hosts, you can often set variables on the group instead of defining them on a role. Remember: child groups override parent groups, and host variables override group variables. See :ref:`define_variables_in_inventory` for details on setting host and group variables.
Set common defaults in a ``group_vars/all`` file. See :ref:`splitting_out_vars` for details on how to organize host and group variables in your inventory. Group variables are generally placed alongside your inventory file, but they can also be returned by dynamic inventory (see :ref:`intro_dynamic_inventory`) or defined in AWX or on :ref:`ansible_platform` from the UI or API:
.. code-block:: yaml
---
# file: /etc/ansible/group_vars/all
# this is the site wide default
ntp_server: default-time.example.com
Set location-specific variables in ``group_vars/my_location`` files. All groups are children of the ``all`` group, so variables set here override those set in ``group_vars/all``:
.. code-block:: yaml
---
# file: /etc/ansible/group_vars/boston
ntp_server: boston-time.example.com
If one host used a different NTP server, you could set that in a host_vars file, which would override the group variable:
.. code-block:: yaml
---
# file: /etc/ansible/host_vars/xyz.boston.example.com
ntp_server: override.example.com
Set defaults in roles to avoid undefined-variable errors. If you share your roles, other users can rely on the reasonable defaults you added in the ``roles/x/defaults/main.yml`` file, or they can easily override those values in inventory or at the command line. See :ref:`playbooks_reuse_roles` for more info. For example:
.. code-block:: yaml
---
# file: roles/x/defaults/main.yml
# if no other value is supplied in inventory or as a parameter, this value will be used
http_port: 80
Set variables in roles to ensure a value is used in that role, and is not overridden by inventory variables. If you are not sharing your role with others, you can define app-specific behaviors like ports this way, in ``roles/x/vars/main.yml``. If you are sharing roles with others, putting variables here makes them harder to override, although they still can by passing a parameter to the role or setting a variable with ``-e``:
.. code-block:: yaml
---
# file: roles/x/vars/main.yml
# this will absolutely be used in this role
http_port: 80
Pass variables as parameters when you call roles for maximum clarity, flexibility, and visibility. This approach overrides any defaults that exist for a role. For example:
.. code-block:: yaml
roles:
- role: apache
vars:
http_port: 8080
When you read this playbook it is clear that you have chosen to set a variable or override a default. You can also pass multiple values, which allows you to run the same role multiple times. See :ref:`run_role_twice` for more details. For example:
.. code-block:: yaml
roles:
- role: app_user
vars:
myname: Ian
- role: app_user
vars:
myname: Terry
- role: app_user
vars:
myname: Graham
- role: app_user
vars:
myname: John
Variables set in one role are available to later roles. You can set variables in a ``roles/common_settings/vars/main.yml`` file and use them in other roles and elsewhere in your playbook:
.. code-block:: yaml
roles:
- role: common_settings
- role: something
vars:
foo: 12
- role: something_else
.. note:: There are some protections in place to avoid the need to namespace variables.
In this example, variables defined in 'common_settings' are available to 'something' and 'something_else' tasks, but tasks in 'something' have foo set at 12, even if 'common_settings' sets foo to 20.
Instead of worrying about variable precedence, we encourage you to think about how easily or how often you want to override a variable when deciding where to set it. If you are not sure what other variables are defined, and you need a particular value, use ``--extra-vars`` (``-e``) to override all other variables.
Using advanced variable syntax
==============================
For information about advanced YAML syntax used to declare variables and have more control over the data placed in YAML files used by Ansible, see :ref:`playbooks_advanced_syntax`.
.. seealso::
:ref:`about_playbooks`
An introduction to playbooks
:ref:`playbooks_conditionals`
Conditional statements in playbooks
:ref:`playbooks_filters`
Jinja2 filters and their uses
:ref:`playbooks_loops`
Looping in playbooks
:ref:`playbooks_reuse_roles`
Playbook organization by roles
:ref:`tips_and_tricks`
Tips and tricks for playbooks
:ref:`special_variables`
List of special variables
`User Mailing List <https://groups.google.com/group/ansible-devel>`_
Have a question? Stop by the google group!
:ref:`communication_irc`
How to join Ansible chat channels
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,559 |
Add a section about boolean variables
|
### Summary
Ansible accepts a broad range of values for boolean variables: `true/false`, `1/0`, `yes/no`, `True/False`, and so on. We recently reached consensus about representing all Boolean values as `true/false` in the module documentation generated from python docstrings - see [this vote](https://github.com/ansible-community/community-topics/discussions/120) and [this issue](https://github.com/ansible-community/community-topics/issues/116) and [this PR](https://github.com/ansible-community/antsibull-docs/pull/19). We plan to update examples and other mentions of boolean values in the docs over the next few months.
The docs should also show users that they do not have to use `true/false` if they choose not to. Adding a section to the [documentation on variables](https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#playbooks-variables) that explains all this will help users.
### Issue Type
Documentation Report
### Component Name
docs/docsite/rst/user_guide/playbooks_variables.rst
### Ansible Version
```console
N/A
```
### Configuration
```console
N/A
```
### OS / Environment
N/A
### Additional Information
None
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78559
|
https://github.com/ansible/ansible/pull/78653
|
5f2bb2ba93293f43df4b4134b428de4abbd8d0dd
|
077e1bfe17b99d805dfa94ad9f67e2384077808c
| 2022-08-16T15:27:47Z |
python
| 2022-09-01T18:28:03Z |
docs/docsite/rst/reference_appendices/YAMLSyntax.rst
|
.. _yaml_syntax:
YAML Syntax
===========
This page provides a basic overview of correct YAML syntax, which is how Ansible
playbooks (our configuration management language) are expressed.
We use YAML because it is easier for humans to read and write than other common
data formats like XML or JSON. Further, there are libraries available in most
programming languages for working with YAML.
You may also wish to read :ref:`working_with_playbooks` at the same time to see how this
is used in practice.
YAML Basics
-----------
For Ansible, nearly every YAML file starts with a list.
Each item in the list is a list of key/value pairs, commonly
called a "hash" or a "dictionary". So, we need to know how
to write lists and dictionaries in YAML.
There's another small quirk to YAML. All YAML files (regardless of their association with Ansible or not) can optionally
begin with ``---`` and end with ``...``. This is part of the YAML format and indicates the start and end of a document.
All members of a list are lines beginning at the same indentation level starting with a ``"- "`` (a dash and a space)::
---
# A list of tasty fruits
- Apple
- Orange
- Strawberry
- Mango
...
A dictionary is represented in a simple ``key: value`` form (the colon must be followed by a space)::
# An employee record
martin:
name: Martin D'vloper
job: Developer
skill: Elite
More complicated data structures are possible, such as lists of dictionaries, dictionaries whose values are lists or a mix of both::
# Employee records
- martin:
name: Martin D'vloper
job: Developer
skills:
- python
- perl
- pascal
- tabitha:
name: Tabitha Bitumen
job: Developer
skills:
- lisp
- fortran
- erlang
Dictionaries and lists can also be represented in an abbreviated form if you really want to::
---
martin: {name: Martin D'vloper, job: Developer, skill: Elite}
fruits: ['Apple', 'Orange', 'Strawberry', 'Mango']
These are called "Flow collections".
.. _truthiness:
Ansible doesn't really use these too much, but you can also specify a boolean value (true/false) in several forms::
create_key: yes
needs_agent: no
knows_oop: True
likes_emacs: TRUE
uses_cvs: false
Use lowercase 'true' or 'false' for boolean values in dictionaries if you want to be compatible with default yamllint options.
Values can span multiple lines using ``|`` or ``>``. Spanning multiple lines using a "Literal Block Scalar" ``|`` will include the newlines and any trailing spaces.
Using a "Folded Block Scalar" ``>`` will fold newlines to spaces; it's used to make what would otherwise be a very long line easier to read and edit.
In either case the indentation will be ignored.
Examples are::
include_newlines: |
exactly as you see
will appear these three
lines of poetry
fold_newlines: >
this is really a
single line of text
despite appearances
While in the above ``>`` example all newlines are folded into spaces, there are two ways to enforce a newline to be kept::
fold_some_newlines: >
a
b
c
d
e
f
Alternatively, it can be enforced by including newline ``\n`` characters::
fold_same_newlines: "a b\nc d\n e\nf\n"
Let's combine what we learned so far in an arbitrary YAML example.
This really has nothing to do with Ansible, but will give you a feel for the format::
---
# An employee record
name: Martin D'vloper
job: Developer
skill: Elite
employed: True
foods:
- Apple
- Orange
- Strawberry
- Mango
languages:
perl: Elite
python: Elite
pascal: Lame
education: |
4 GCSEs
3 A-Levels
BSc in the Internet of Things
That's all you really need to know about YAML to start writing `Ansible` playbooks.
Gotchas
-------
While you can put just about anything into an unquoted scalar, there are some exceptions.
A colon followed by a space (or newline) ``": "`` is an indicator for a mapping.
A space followed by the pound sign ``" #"`` starts a comment.
Because of this, the following is going to result in a YAML syntax error::
foo: somebody said I should put a colon here: so I did
windows_drive: c:
...but this will work::
windows_path: c:\windows
You will want to quote hash values using colons followed by a space or the end of the line::
foo: 'somebody said I should put a colon here: so I did'
windows_drive: 'c:'
...and then the colon will be preserved.
Alternatively, you can use double quotes::
foo: "somebody said I should put a colon here: so I did"
windows_drive: "c:"
The difference between single quotes and double quotes is that in double quotes
you can use escapes::
foo: "a \t TAB and a \n NEWLINE"
The list of allowed escapes can be found in the YAML Specification under "Escape Sequences" (YAML 1.1) or "Escape Characters" (YAML 1.2).
The following is invalid YAML:
.. code-block:: text
foo: "an escaped \' single quote"
Further, Ansible uses "{{ var }}" for variables. If a value after a colon starts
with a "{", YAML will think it is a dictionary, so you must quote it, like so::
foo: "{{ variable }}"
If your value starts with a quote the entire value must be quoted, not just part of it. Here are some additional examples of how to properly quote things::
foo: "{{ variable }}/additional/string/literal"
foo2: "{{ variable }}\\backslashes\\are\\also\\special\\characters"
foo3: "even if it's just a string literal it must all be quoted"
Not valid::
foo: "E:\\path\\"rest\\of\\path
In addition to ``'`` and ``"`` there are a number of characters that are special (or reserved) and cannot be used
as the first character of an unquoted scalar: ``[] {} > | * & ! % # ` @ ,``.
You should also be aware of ``? : -``. In YAML, they are allowed at the beginning of a string if a non-space
character follows, but YAML processor implementations differ, so it's better to use quotes.
In Flow Collections, the rules are a bit more strict::
a scalar in block mapping: this } is [ all , valid
flow mapping: { key: "you { should [ use , quotes here" }
Boolean conversion is helpful, but this can be a problem when you want a literal `yes` or other boolean values as a string.
In these cases just use quotes::
non_boolean: "yes"
other_string: "False"
YAML converts certain strings into floating-point values, such as the string
`1.0`. If you need to specify a version number (in a requirements.yml file, for
example), you will need to quote the value if it looks like a floating-point
value::
version: "1.0"
.. seealso::
:ref:`working_with_playbooks`
Learn what playbooks can do and how to write/run them.
`YAMLLint <http://yamllint.com/>`_
YAML Lint (online) helps you debug YAML syntax if you are having problems
`GitHub examples directory <https://github.com/ansible/ansible-examples>`_
Complete playbook files from the github project source
`Wikipedia YAML syntax reference <https://en.wikipedia.org/wiki/YAML>`_
A good guide to YAML syntax
`Mailing List <https://groups.google.com/group/ansible-project>`_
Questions? Help? Ideas? Stop by the list on Google Groups
:ref:`communication_irc`
How to join Ansible chat channels (join #yaml for yaml-specific questions)
`YAML 1.1 Specification <https://yaml.org/spec/1.1/>`_
The Specification for YAML 1.1, which PyYAML and libyaml are currently
implementing
`YAML 1.2 Specification <https://yaml.org/spec/1.2/spec.html>`_
For completeness, YAML 1.2 is the successor of 1.1
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,042 |
Block with `rescue` containing block with `always` inside. Strange stats behavior.
|
### Summary
I have a strange behavior of `rescued` stat after run. I expected it to be equal 1 since there was a rescue block execution but instead it has value of 0.
This is caused by block-with-always role inside block with rescue.
Is this expected behavior? How can I make sure if rescue block was actually executed in this scenario, if I'm only able to get an error?
### Issue Type
Bug Report
### Component Name
block
### Ansible Version
```console
ansible [core 2.13.0]
```
### Configuration
```console
All defaults
```
### OS / Environment
Arch Linux.
### Steps to Reproduce
Here's my main playbook:
```
---
- name: Ansible Blocks
hosts: localhost
gather_facts: false
tasks:
- name: kek
block:
- name: Run Main Role
import_role:
name: mainerrrole
rescue:
- name: Rescue block (perform recovery)
debug:
msg: "Something went wrong, cleaning up.."
```
Here's the `mainerrrole` code
```
- name: Deploy
block:
- name: ERR ROLE Failing intentionally by role
command: "ls -l /tmp/does-not-exist"
always:
- name: Cleanup
import_role:
name: okrole
```
Here's the `okrole` code
```
- name: OK ROLE List home directory content
command: "ls -l ~/"
```
### Expected Results
Expected run stats to contain rescued=1 since there was a rescue block execution in logs.
### Actual Results
```console
ansible-playbook play.yml
[WARNING]: No inventory was parsed, only implicit localhost is available
[WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'
_______________________
< PLAY [Ansible Blocks] >
-----------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
_____________________________________________________________
< TASK [mainerrrole : ERR ROLE Failing intentionally by role] >
-------------------------------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
fatal: [localhost]: FAILED! => {"changed": true, "cmd": ["ls", "-l", "/tmp/does-not-exist"], "delta": "0:00:00.005622", "end": "2022-06-13 14:44:50.711257", "msg": "non-zero return code", "rc": 2, "start": "2022-06-13 14:44:50.705635", "stderr": "ls: cannot access '/tmp/does-not-exist': No such file or directory", "stderr_lines": ["ls: cannot access '/tmp/does-not-exist': No such file or directory"], "stdout": "", "stdout_lines": []}
_____________________________________________________
< TASK [okrole : OK ROLE List home directory content] >
-----------------------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
changed: [localhost]
________________________________________
< TASK [Rescue block (perform recovery)] >
----------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
ok: [localhost] => {
"msg": "Something went wrong, cleaning up.."
}
____________
< PLAY RECAP >
------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
localhost : ok=2 changed=1 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78042
|
https://github.com/ansible/ansible/pull/78676
|
848143640ba88f34e6e952faba4e1b5fd1c1b2dd
|
fd19ff231055c439c6a2e9bb590fef09818b2afc
| 2022-06-13T04:58:40Z |
python
| 2022-09-06T15:11:49Z |
changelogs/fragments/43191-72638-ansible_failed_task-fixes.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,042 |
Block with `rescue` containing block with `always` inside. Strange stats behavior.
|
### Summary
I have a strange behavior of `rescued` stat after run. I expected it to be equal 1 since there was a rescue block execution but instead it has value of 0.
This is caused by block-with-always role inside block with rescue.
Is this expected behavior? How can I make sure if rescue block was actually executed in this scenario, if I'm only able to get an error?
### Issue Type
Bug Report
### Component Name
block
### Ansible Version
```console
ansible [core 2.13.0]
```
### Configuration
```console
All defaults
```
### OS / Environment
Arch Linux.
### Steps to Reproduce
Here's my main playbook:
```
---
- name: Ansible Blocks
hosts: localhost
gather_facts: false
tasks:
- name: kek
block:
- name: Run Main Role
import_role:
name: mainerrrole
rescue:
- name: Rescue block (perform recovery)
debug:
msg: "Something went wrong, cleaning up.."
```
Here's the `mainerrrole` code
```
- name: Deploy
block:
- name: ERR ROLE Failing intentionally by role
command: "ls -l /tmp/does-not-exist"
always:
- name: Cleanup
import_role:
name: okrole
```
Here's the `okrole` code
```
- name: OK ROLE List home directory content
command: "ls -l ~/"
```
### Expected Results
Expected run stats to contain rescued=1 since there was a rescue block execution in logs.
### Actual Results
```console
ansible-playbook play.yml
[WARNING]: No inventory was parsed, only implicit localhost is available
[WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'
_______________________
< PLAY [Ansible Blocks] >
-----------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
_____________________________________________________________
< TASK [mainerrrole : ERR ROLE Failing intentionally by role] >
-------------------------------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
fatal: [localhost]: FAILED! => {"changed": true, "cmd": ["ls", "-l", "/tmp/does-not-exist"], "delta": "0:00:00.005622", "end": "2022-06-13 14:44:50.711257", "msg": "non-zero return code", "rc": 2, "start": "2022-06-13 14:44:50.705635", "stderr": "ls: cannot access '/tmp/does-not-exist': No such file or directory", "stderr_lines": ["ls: cannot access '/tmp/does-not-exist': No such file or directory"], "stdout": "", "stdout_lines": []}
_____________________________________________________
< TASK [okrole : OK ROLE List home directory content] >
-----------------------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
changed: [localhost]
________________________________________
< TASK [Rescue block (perform recovery)] >
----------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
ok: [localhost] => {
"msg": "Something went wrong, cleaning up.."
}
____________
< PLAY RECAP >
------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
localhost : ok=2 changed=1 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78042
|
https://github.com/ansible/ansible/pull/78676
|
848143640ba88f34e6e952faba4e1b5fd1c1b2dd
|
fd19ff231055c439c6a2e9bb590fef09818b2afc
| 2022-06-13T04:58:40Z |
python
| 2022-09-06T15:11:49Z |
docs/docsite/rst/playbook_guide/playbooks_blocks.rst
|
.. _playbooks_blocks:
******
Blocks
******
Blocks create logical groups of tasks. Blocks also offer ways to handle task errors, similar to exception handling in many programming languages.
.. contents::
:local:
Grouping tasks with blocks
==========================
All tasks in a block inherit directives applied at the block level. Most of what you can apply to a single task (with the exception of loops) can be applied at the block level, so blocks make it much easier to set data or directives common to the tasks. The directive does not affect the block itself, it is only inherited by the tasks enclosed by a block. For example, a `when` statement is applied to the tasks within a block, not to the block itself.
.. code-block:: YAML
:emphasize-lines: 3
:caption: Block example with named tasks inside the block
tasks:
- name: Install, configure, and start Apache
block:
- name: Install httpd and memcached
ansible.builtin.yum:
name:
- httpd
- memcached
state: present
- name: Apply the foo config template
ansible.builtin.template:
src: templates/src.j2
dest: /etc/foo.conf
- name: Start service bar and enable it
ansible.builtin.service:
name: bar
state: started
enabled: True
when: ansible_facts['distribution'] == 'CentOS'
become: true
become_user: root
ignore_errors: yes
In the example above, the 'when' condition will be evaluated before Ansible runs each of the three tasks in the block. All three tasks also inherit the privilege escalation directives, running as the root user. Finally, ``ignore_errors: yes`` ensures that Ansible continues to execute the playbook even if some of the tasks fail.
Names for blocks have been available since Ansible 2.3. We recommend using names in all tasks, within blocks or elsewhere, for better visibility into the tasks being executed when you run the playbook.
.. _block_error_handling:
Handling errors with blocks
===========================
You can control how Ansible responds to task errors using blocks with ``rescue`` and ``always`` sections.
Rescue blocks specify tasks to run when an earlier task in a block fails. This approach is similar to exception handling in many programming languages. Ansible only runs rescue blocks after a task returns a 'failed' state. Bad task definitions and unreachable hosts will not trigger the rescue block.
.. _block_rescue:
.. code-block:: YAML
:emphasize-lines: 3,14
:caption: Block error handling example
tasks:
- name: Handle the error
block:
- name: Print a message
ansible.builtin.debug:
msg: 'I execute normally'
- name: Force a failure
ansible.builtin.command: /bin/false
- name: Never print this
ansible.builtin.debug:
msg: 'I never execute, due to the above task failing, :-('
rescue:
- name: Print when errors
ansible.builtin.debug:
msg: 'I caught an error, can do stuff here to fix it, :-)'
You can also add an ``always`` section to a block. Tasks in the ``always`` section run no matter what the task status of the previous block is.
.. _block_always:
.. code-block:: YAML
:emphasize-lines: 2,13
:caption: Block with always section
- name: Always do X
block:
- name: Print a message
ansible.builtin.debug:
msg: 'I execute normally'
- name: Force a failure
ansible.builtin.command: /bin/false
- name: Never print this
ansible.builtin.debug:
msg: 'I never execute :-('
always:
- name: Always do this
ansible.builtin.debug:
msg: "This always executes, :-)"
Together, these elements offer complex error handling.
.. code-block:: YAML
:emphasize-lines: 2,13,24
:caption: Block with all sections
- name: Attempt and graceful roll back demo
block:
- name: Print a message
ansible.builtin.debug:
msg: 'I execute normally'
- name: Force a failure
ansible.builtin.command: /bin/false
- name: Never print this
ansible.builtin.debug:
msg: 'I never execute, due to the above task failing, :-('
rescue:
- name: Print when errors
ansible.builtin.debug:
msg: 'I caught an error'
- name: Force a failure in middle of recovery! >:-)
ansible.builtin.command: /bin/false
- name: Never print this
ansible.builtin.debug:
msg: 'I also never execute :-('
always:
- name: Always do this
ansible.builtin.debug:
msg: "This always executes"
The tasks in the ``block`` execute normally. If any tasks in the block return ``failed``, the ``rescue`` section executes tasks to recover from the error. The ``always`` section runs regardless of the results of the ``block`` and ``rescue`` sections.
If an error occurs in the block and the rescue task succeeds, Ansible reverts the failed status of the original task for the run and continues to run the play as if the original task had succeeded. The rescued task is considered successful, and does not trigger ``max_fail_percentage`` or ``any_errors_fatal`` configurations. However, Ansible still reports a failure in the playbook statistics.
You can use blocks with ``flush_handlers`` in a rescue task to ensure that all handlers run even if an error occurs:
.. code-block:: YAML
:emphasize-lines: 3,12
:caption: Block run handlers in error handling
tasks:
- name: Attempt and graceful roll back demo
block:
- name: Print a message
ansible.builtin.debug:
msg: 'I execute normally'
changed_when: yes
notify: run me even after an error
- name: Force a failure
ansible.builtin.command: /bin/false
rescue:
- name: Make sure all handlers run
meta: flush_handlers
handlers:
- name: Run me even after an error
ansible.builtin.debug:
msg: 'This handler runs even on error'
.. versionadded:: 2.1
Ansible provides a couple of variables for tasks in the ``rescue`` portion of a block:
ansible_failed_task
The task that returned 'failed' and triggered the rescue. For example, to get the name use ``ansible_failed_task.name``.
ansible_failed_result
The captured return result of the failed task that triggered the rescue. This would equate to having used this var in the ``register`` keyword.
.. seealso::
:ref:`playbooks_intro`
An introduction to playbooks
:ref:`playbooks_reuse_roles`
Playbook organization by roles
`User Mailing List <https://groups.google.com/group/ansible-devel>`_
Have a question? Stop by the google group!
:ref:`communication_irc`
How to join Ansible chat channels
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,042 |
Block with `rescue` containing block with `always` inside. Strange stats behavior.
|
### Summary
I have a strange behavior of `rescued` stat after run. I expected it to be equal 1 since there was a rescue block execution but instead it has value of 0.
This is caused by block-with-always role inside block with rescue.
Is this expected behavior? How can I make sure if rescue block was actually executed in this scenario, if I'm only able to get an error?
### Issue Type
Bug Report
### Component Name
block
### Ansible Version
```console
ansible [core 2.13.0]
```
### Configuration
```console
All defaults
```
### OS / Environment
Arch Linux.
### Steps to Reproduce
Here's my main playbook:
```
---
- name: Ansible Blocks
hosts: localhost
gather_facts: false
tasks:
- name: kek
block:
- name: Run Main Role
import_role:
name: mainerrrole
rescue:
- name: Rescue block (perform recovery)
debug:
msg: "Something went wrong, cleaning up.."
```
Here's the `mainerrrole` code
```
- name: Deploy
block:
- name: ERR ROLE Failing intentionally by role
command: "ls -l /tmp/does-not-exist"
always:
- name: Cleanup
import_role:
name: okrole
```
Here's the `okrole` code
```
- name: OK ROLE List home directory content
command: "ls -l ~/"
```
### Expected Results
Expected run stats to contain rescued=1 since there was a rescue block execution in logs.
### Actual Results
```console
ansible-playbook play.yml
[WARNING]: No inventory was parsed, only implicit localhost is available
[WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'
_______________________
< PLAY [Ansible Blocks] >
-----------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
_____________________________________________________________
< TASK [mainerrrole : ERR ROLE Failing intentionally by role] >
-------------------------------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
fatal: [localhost]: FAILED! => {"changed": true, "cmd": ["ls", "-l", "/tmp/does-not-exist"], "delta": "0:00:00.005622", "end": "2022-06-13 14:44:50.711257", "msg": "non-zero return code", "rc": 2, "start": "2022-06-13 14:44:50.705635", "stderr": "ls: cannot access '/tmp/does-not-exist': No such file or directory", "stderr_lines": ["ls: cannot access '/tmp/does-not-exist': No such file or directory"], "stdout": "", "stdout_lines": []}
_____________________________________________________
< TASK [okrole : OK ROLE List home directory content] >
-----------------------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
changed: [localhost]
________________________________________
< TASK [Rescue block (perform recovery)] >
----------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
ok: [localhost] => {
"msg": "Something went wrong, cleaning up.."
}
____________
< PLAY RECAP >
------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
localhost : ok=2 changed=1 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78042
|
https://github.com/ansible/ansible/pull/78676
|
848143640ba88f34e6e952faba4e1b5fd1c1b2dd
|
fd19ff231055c439c6a2e9bb590fef09818b2afc
| 2022-06-13T04:58:40Z |
python
| 2022-09-06T15:11:49Z |
lib/ansible/executor/play_iterator.py
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
from enum import IntEnum, IntFlag
from ansible import constants as C
from ansible.errors import AnsibleAssertionError
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.utils.display import Display
display = Display()
__all__ = ['PlayIterator', 'IteratingStates', 'FailedStates']
class IteratingStates(IntEnum):
SETUP = 0
TASKS = 1
RESCUE = 2
ALWAYS = 3
HANDLERS = 4
COMPLETE = 5
class FailedStates(IntFlag):
NONE = 0
SETUP = 1
TASKS = 2
RESCUE = 4
ALWAYS = 8
HANDLERS = 16
class HostState:
def __init__(self, blocks):
self._blocks = blocks[:]
self.handlers = []
self.cur_block = 0
self.cur_regular_task = 0
self.cur_rescue_task = 0
self.cur_always_task = 0
self.cur_handlers_task = 0
self.run_state = IteratingStates.SETUP
self.fail_state = FailedStates.NONE
self.pre_flushing_run_state = None
self.update_handlers = True
self.pending_setup = False
self.tasks_child_state = None
self.rescue_child_state = None
self.always_child_state = None
self.did_rescue = False
self.did_start_at_task = False
def __repr__(self):
return "HostState(%r)" % self._blocks
def __str__(self):
return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, handlers=%d, run_state=%s, fail_state=%s, "
"pre_flushing_run_state=%s, update_handlers=%s, pending_setup=%s, "
"tasks child state? (%s), rescue child state? (%s), always child state? (%s), "
"did rescue? %s, did start at task? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
self.cur_always_task,
self.cur_handlers_task,
self.run_state,
self.fail_state,
self.pre_flushing_run_state,
self.update_handlers,
self.pending_setup,
self.tasks_child_state,
self.rescue_child_state,
self.always_child_state,
self.did_rescue,
self.did_start_at_task,
))
def __eq__(self, other):
if not isinstance(other, HostState):
return False
for attr in ('_blocks',
'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task', 'cur_handlers_task',
'run_state', 'fail_state', 'pre_flushing_run_state', 'update_handlers', 'pending_setup',
'tasks_child_state', 'rescue_child_state', 'always_child_state'):
if getattr(self, attr) != getattr(other, attr):
return False
return True
def get_current_block(self):
return self._blocks[self.cur_block]
def copy(self):
new_state = HostState(self._blocks)
new_state.handlers = self.handlers[:]
new_state.cur_block = self.cur_block
new_state.cur_regular_task = self.cur_regular_task
new_state.cur_rescue_task = self.cur_rescue_task
new_state.cur_always_task = self.cur_always_task
new_state.cur_handlers_task = self.cur_handlers_task
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pre_flushing_run_state = self.pre_flushing_run_state
new_state.update_handlers = self.update_handlers
new_state.pending_setup = self.pending_setup
new_state.did_rescue = self.did_rescue
new_state.did_start_at_task = self.did_start_at_task
if self.tasks_child_state is not None:
new_state.tasks_child_state = self.tasks_child_state.copy()
if self.rescue_child_state is not None:
new_state.rescue_child_state = self.rescue_child_state.copy()
if self.always_child_state is not None:
new_state.always_child_state = self.always_child_state.copy()
return new_state
class PlayIterator:
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
self._play = play
self._blocks = []
self._variable_manager = variable_manager
setup_block = Block(play=self._play)
# Gathering facts with run_once would copy the facts from one host to
# the others.
setup_block.run_once = False
setup_task = Task(block=setup_block)
setup_task.action = 'gather_facts'
# TODO: hardcoded resolution here, but should use actual resolution code in the end,
# in case of 'legacy' mismatch
setup_task.resolved_action = 'ansible.builtin.gather_facts'
setup_task.name = 'Gathering Facts'
setup_task.args = {}
# Unless play is specifically tagged, gathering should 'always' run
if not self._play.tags:
setup_task.tags = ['always']
# Default options to gather
for option in ('gather_subset', 'gather_timeout', 'fact_path'):
value = getattr(self._play, option, None)
if value is not None:
setup_task.args[option] = value
setup_task.set_loader(self._play._loader)
# short circuit fact gathering if the entire playbook is conditional
if self._play._included_conditional is not None:
setup_task.when = self._play._included_conditional[:]
setup_block.block = [setup_task]
setup_block = setup_block.filter_tagged_tasks(all_vars)
self._blocks.append(setup_block)
# keep flatten (no blocks) list of all tasks from the play
# used for the lockstep mechanism in the linear strategy
self.all_tasks = setup_block.get_tasks()
for block in self._play.compile():
new_block = block.filter_tagged_tasks(all_vars)
if new_block.has_tasks():
self._blocks.append(new_block)
self.all_tasks.extend(new_block.get_tasks())
# keep list of all handlers, it is copied into each HostState
# at the beginning of IteratingStates.HANDLERS
# the copy happens at each flush in order to restore the original
# list and remove any included handlers that might not be notified
# at the particular flush
self.handlers = [h for b in self._play.handlers for h in b.block]
self._host_states = {}
start_at_matched = False
batch = inventory.get_hosts(self._play.hosts, order=self._play.order)
self.batch_size = len(batch)
for host in batch:
self.set_state_for_host(host.name, HostState(blocks=self._blocks))
# if we're looking to start at a specific task, iterate through
# the tasks for this host until we find the specified task
if play_context.start_at_task is not None and not start_at_done:
while True:
(s, task) = self.get_next_task_for_host(host, peek=True)
if s.run_state == IteratingStates.COMPLETE:
break
if task.name == play_context.start_at_task or (task.name and fnmatch.fnmatch(task.name, play_context.start_at_task)) or \
task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
start_at_matched = True
break
self.set_state_for_host(host.name, s)
# finally, reset the host's state to IteratingStates.SETUP
if start_at_matched:
self._host_states[host.name].did_start_at_task = True
self._host_states[host.name].run_state = IteratingStates.SETUP
if start_at_matched:
# we have our match, so clear the start_at_task field on the
# play context to flag that we've started at a task (and future
# plays won't try to advance)
play_context.start_at_task = None
self.end_play = False
self.cur_task = 0
def get_host_state(self, host):
# Since we're using the PlayIterator to carry forward failed hosts,
# in the event that a previous host was not in the current inventory
# we create a stub state for it now
if host.name not in self._host_states:
self.set_state_for_host(host.name, HostState(blocks=[]))
return self._host_states[host.name].copy()
def cache_block_tasks(self, block):
display.deprecated(
'PlayIterator.cache_block_tasks is now noop due to the changes '
'in the way tasks are cached and is deprecated.',
version=2.16
)
def get_next_task_for_host(self, host, peek=False):
display.debug("getting the next task for host %s" % host.name)
s = self.get_host_state(host)
task = None
if s.run_state == IteratingStates.COMPLETE:
display.debug("host %s is done iterating, returning" % host.name)
return (s, None)
(s, task) = self._get_next_task_from_state(s, host=host)
if not peek:
self.set_state_for_host(host.name, s)
display.debug("done getting next task for host %s" % host.name)
display.debug(" ^ task is: %s" % task)
display.debug(" ^ state is: %s" % s)
return (s, task)
def _get_next_task_from_state(self, state, host):
task = None
# try and find the next task, given the current state.
while True:
# try to get the current block from the list of blocks, and
# if we run past the end of the list we know we're done with
# this block
try:
block = state._blocks[state.cur_block]
except IndexError:
state.run_state = IteratingStates.COMPLETE
return (state, None)
if state.run_state == IteratingStates.SETUP:
# First, we check to see if we were pending setup. If not, this is
# the first trip through IteratingStates.SETUP, so we set the pending_setup
# flag and try to determine if we do in fact want to gather facts for
# the specified host.
if not state.pending_setup:
state.pending_setup = True
# Gather facts if the default is 'smart' and we have not yet
# done it for this host; or if 'explicit' and the play sets
# gather_facts to True; or if 'implicit' and the play does
# NOT explicitly set gather_facts to False.
gathering = C.DEFAULT_GATHERING
implied = self._play.gather_facts is None or boolean(self._play.gather_facts, strict=False)
if (gathering == 'implicit' and implied) or \
(gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \
(gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('_ansible_facts_gathered', False))):
# The setup block is always self._blocks[0], as we inject it
# during the play compilation in __init__ above.
setup_block = self._blocks[0]
if setup_block.has_tasks() and len(setup_block.block) > 0:
task = setup_block.block[0]
else:
# This is the second trip through IteratingStates.SETUP, so we clear
# the flag and move onto the next block in the list while setting
# the run state to IteratingStates.TASKS
state.pending_setup = False
state.run_state = IteratingStates.TASKS
if not state.did_start_at_task:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
elif state.run_state == IteratingStates.TASKS:
# clear the pending setup flag, since we're past that and it didn't fail
if state.pending_setup:
state.pending_setup = False
# First, we check for a child task state that is not failed, and if we
# have one recurse into it for the next task. If we're done with the child
# state, we clear it and drop back to getting the next task from the list.
if state.tasks_child_state:
(state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host)
if self._check_failed_state(state.tasks_child_state):
# failed child state, so clear it and move into the rescue portion
state.tasks_child_state = None
self._set_failed_state(state)
else:
# get the next task recursively
if task is None or state.tasks_child_state.run_state == IteratingStates.COMPLETE:
# we're done with the child state, so clear it and continue
# back to the top of the loop to get the next task
state.tasks_child_state = None
continue
else:
# First here, we check to see if we've failed anywhere down the chain
# of states we have, and if so we move onto the rescue portion. Otherwise,
# we check to see if we've moved past the end of the list of tasks. If so,
# we move into the always portion of the block, otherwise we get the next
# task from the list.
if self._check_failed_state(state):
state.run_state = IteratingStates.RESCUE
elif state.cur_regular_task >= len(block.block):
state.run_state = IteratingStates.ALWAYS
else:
task = block.block[state.cur_regular_task]
# if the current task is actually a child block, create a child
# state for us to recurse into on the next pass
if isinstance(task, Block):
state.tasks_child_state = HostState(blocks=[task])
state.tasks_child_state.run_state = IteratingStates.TASKS
# since we've created the child state, clear the task
# so we can pick up the child state on the next pass
task = None
state.cur_regular_task += 1
elif state.run_state == IteratingStates.RESCUE:
# The process here is identical to IteratingStates.TASKS, except instead
# we move into the always portion of the block.
if state.rescue_child_state:
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host)
if self._check_failed_state(state.rescue_child_state):
state.rescue_child_state = None
self._set_failed_state(state)
else:
if task is None or state.rescue_child_state.run_state == IteratingStates.COMPLETE:
state.rescue_child_state = None
continue
else:
if state.fail_state & FailedStates.RESCUE == FailedStates.RESCUE:
state.run_state = IteratingStates.ALWAYS
elif state.cur_rescue_task >= len(block.rescue):
if len(block.rescue) > 0:
state.fail_state = FailedStates.NONE
state.run_state = IteratingStates.ALWAYS
state.did_rescue = True
else:
task = block.rescue[state.cur_rescue_task]
if isinstance(task, Block):
state.rescue_child_state = HostState(blocks=[task])
state.rescue_child_state.run_state = IteratingStates.TASKS
task = None
state.cur_rescue_task += 1
elif state.run_state == IteratingStates.ALWAYS:
# And again, the process here is identical to IteratingStates.TASKS, except
# instead we either move onto the next block in the list, or we set the
# run state to IteratingStates.COMPLETE in the event of any errors, or when we
# have hit the end of the list of blocks.
if state.always_child_state:
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host)
if self._check_failed_state(state.always_child_state):
state.always_child_state = None
self._set_failed_state(state)
else:
if task is None or state.always_child_state.run_state == IteratingStates.COMPLETE:
state.always_child_state = None
continue
else:
if state.cur_always_task >= len(block.always):
if state.fail_state != FailedStates.NONE:
state.run_state = IteratingStates.COMPLETE
else:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.run_state = IteratingStates.TASKS
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
state.did_rescue = False
else:
task = block.always[state.cur_always_task]
if isinstance(task, Block):
state.always_child_state = HostState(blocks=[task])
state.always_child_state.run_state = IteratingStates.TASKS
task = None
state.cur_always_task += 1
elif state.run_state == IteratingStates.HANDLERS:
if state.update_handlers:
# reset handlers for HostState since handlers from include_tasks
# might be there from previous flush
state.handlers = self.handlers[:]
state.update_handlers = False
state.cur_handlers_task = 0
if state.fail_state & FailedStates.HANDLERS == FailedStates.HANDLERS:
state.update_handlers = True
state.run_state = IteratingStates.COMPLETE
else:
while True:
try:
task = state.handlers[state.cur_handlers_task]
except IndexError:
task = None
state.run_state = state.pre_flushing_run_state
state.update_handlers = True
break
else:
state.cur_handlers_task += 1
if task.is_host_notified(host):
break
elif state.run_state == IteratingStates.COMPLETE:
return (state, None)
# if something above set the task, break out of the loop now
if task:
break
return (state, task)
def _set_failed_state(self, state):
if state.run_state == IteratingStates.SETUP:
state.fail_state |= FailedStates.SETUP
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.TASKS:
if state.tasks_child_state is not None:
state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
else:
state.fail_state |= FailedStates.TASKS
if state._blocks[state.cur_block].rescue:
state.run_state = IteratingStates.RESCUE
elif state._blocks[state.cur_block].always:
state.run_state = IteratingStates.ALWAYS
else:
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.RESCUE:
if state.rescue_child_state is not None:
state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
else:
state.fail_state |= FailedStates.RESCUE
if state._blocks[state.cur_block].always:
state.run_state = IteratingStates.ALWAYS
else:
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.ALWAYS:
if state.always_child_state is not None:
state.always_child_state = self._set_failed_state(state.always_child_state)
else:
state.fail_state |= FailedStates.ALWAYS
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.HANDLERS:
state.fail_state |= FailedStates.HANDLERS
state.update_handlers = True
if state._blocks[state.cur_block].rescue:
state.run_state = IteratingStates.RESCUE
elif state._blocks[state.cur_block].always:
state.run_state = IteratingStates.ALWAYS
else:
state.run_state = IteratingStates.COMPLETE
return state
def mark_host_failed(self, host):
s = self.get_host_state(host)
display.debug("marking host %s failed, current state: %s" % (host, s))
s = self._set_failed_state(s)
display.debug("^ failed state is now: %s" % s)
self.set_state_for_host(host.name, s)
self._play._removed_hosts.append(host.name)
def get_failed_hosts(self):
return dict((host, True) for (host, state) in self._host_states.items() if self._check_failed_state(state))
def _check_failed_state(self, state):
if state is None:
return False
elif state.run_state == IteratingStates.RESCUE and self._check_failed_state(state.rescue_child_state):
return True
elif state.run_state == IteratingStates.ALWAYS and self._check_failed_state(state.always_child_state):
return True
elif state.run_state == IteratingStates.HANDLERS and state.fail_state & FailedStates.HANDLERS == FailedStates.HANDLERS:
return True
elif state.fail_state != FailedStates.NONE:
if state.run_state == IteratingStates.RESCUE and state.fail_state & FailedStates.RESCUE == 0:
return False
elif state.run_state == IteratingStates.ALWAYS and state.fail_state & FailedStates.ALWAYS == 0:
return False
else:
return not (state.did_rescue and state.fail_state & FailedStates.ALWAYS == 0)
elif state.run_state == IteratingStates.TASKS and self._check_failed_state(state.tasks_child_state):
cur_block = state._blocks[state.cur_block]
if len(cur_block.rescue) > 0 and state.fail_state & FailedStates.RESCUE == 0:
return False
else:
return True
return False
def is_failed(self, host):
s = self.get_host_state(host)
return self._check_failed_state(s)
def clear_host_errors(self, host):
self._clear_state_errors(self.get_state_for_host(host.name))
def _clear_state_errors(self, state: HostState) -> None:
state.fail_state = FailedStates.NONE
if state.tasks_child_state is not None:
self._clear_state_errors(state.tasks_child_state)
elif state.rescue_child_state is not None:
self._clear_state_errors(state.rescue_child_state)
elif state.always_child_state is not None:
self._clear_state_errors(state.always_child_state)
def get_active_state(self, state):
'''
Finds the active state, recursively if necessary when there are child states.
'''
if state.run_state == IteratingStates.TASKS and state.tasks_child_state is not None:
return self.get_active_state(state.tasks_child_state)
elif state.run_state == IteratingStates.RESCUE and state.rescue_child_state is not None:
return self.get_active_state(state.rescue_child_state)
elif state.run_state == IteratingStates.ALWAYS and state.always_child_state is not None:
return self.get_active_state(state.always_child_state)
return state
def is_any_block_rescuing(self, state):
'''
Given the current HostState state, determines if the current block, or any child blocks,
are in rescue mode.
'''
if state.run_state == IteratingStates.RESCUE:
return True
if state.tasks_child_state is not None:
return self.is_any_block_rescuing(state.tasks_child_state)
return False
def get_original_task(self, host, task):
display.deprecated(
'PlayIterator.get_original_task is now noop due to the changes '
'in the way tasks are cached and is deprecated.',
version=2.16
)
return (None, None)
def _insert_tasks_into_state(self, state, task_list):
# if we've failed at all, or if the task list is empty, just return the current state
if (state.fail_state != FailedStates.NONE and state.run_state == IteratingStates.TASKS) or not task_list:
return state
if state.run_state == IteratingStates.TASKS:
if state.tasks_child_state:
state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.block[:state.cur_regular_task]
after = target_block.block[state.cur_regular_task:]
target_block.block = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.RESCUE:
if state.rescue_child_state:
state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.rescue[:state.cur_rescue_task]
after = target_block.rescue[state.cur_rescue_task:]
target_block.rescue = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.ALWAYS:
if state.always_child_state:
state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.always[:state.cur_always_task]
after = target_block.always[state.cur_always_task:]
target_block.always = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.HANDLERS:
state.handlers[state.cur_handlers_task:state.cur_handlers_task] = [h for b in task_list for h in b.block]
return state
def add_tasks(self, host, task_list):
self.set_state_for_host(host.name, self._insert_tasks_into_state(self.get_host_state(host), task_list))
@property
def host_states(self):
return self._host_states
def get_state_for_host(self, hostname: str) -> HostState:
return self._host_states[hostname]
def set_state_for_host(self, hostname: str, state: HostState) -> None:
if not isinstance(state, HostState):
raise AnsibleAssertionError('Expected state to be a HostState but was a %s' % type(state))
self._host_states[hostname] = state
def set_run_state_for_host(self, hostname: str, run_state: IteratingStates) -> None:
if not isinstance(run_state, IteratingStates):
raise AnsibleAssertionError('Expected run_state to be a IteratingStates but was %s' % (type(run_state)))
self._host_states[hostname].run_state = run_state
def set_fail_state_for_host(self, hostname: str, fail_state: FailedStates) -> None:
if not isinstance(fail_state, FailedStates):
raise AnsibleAssertionError('Expected fail_state to be a FailedStates but was %s' % (type(fail_state)))
self._host_states[hostname].fail_state = fail_state
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,042 |
Block with `rescue` containing block with `always` inside. Strange stats behavior.
|
### Summary
I have a strange behavior of `rescued` stat after run. I expected it to be equal 1 since there was a rescue block execution but instead it has value of 0.
This is caused by block-with-always role inside block with rescue.
Is this expected behavior? How can I make sure if rescue block was actually executed in this scenario, if I'm only able to get an error?
### Issue Type
Bug Report
### Component Name
block
### Ansible Version
```console
ansible [core 2.13.0]
```
### Configuration
```console
All defaults
```
### OS / Environment
Arch Linux.
### Steps to Reproduce
Here's my main playbook:
```
---
- name: Ansible Blocks
hosts: localhost
gather_facts: false
tasks:
- name: kek
block:
- name: Run Main Role
import_role:
name: mainerrrole
rescue:
- name: Rescue block (perform recovery)
debug:
msg: "Something went wrong, cleaning up.."
```
Here's the `mainerrrole` code
```
- name: Deploy
block:
- name: ERR ROLE Failing intentionally by role
command: "ls -l /tmp/does-not-exist"
always:
- name: Cleanup
import_role:
name: okrole
```
Here's the `okrole` code
```
- name: OK ROLE List home directory content
command: "ls -l ~/"
```
### Expected Results
Expected run stats to contain rescued=1 since there was a rescue block execution in logs.
### Actual Results
```console
ansible-playbook play.yml
[WARNING]: No inventory was parsed, only implicit localhost is available
[WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'
_______________________
< PLAY [Ansible Blocks] >
-----------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
_____________________________________________________________
< TASK [mainerrrole : ERR ROLE Failing intentionally by role] >
-------------------------------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
fatal: [localhost]: FAILED! => {"changed": true, "cmd": ["ls", "-l", "/tmp/does-not-exist"], "delta": "0:00:00.005622", "end": "2022-06-13 14:44:50.711257", "msg": "non-zero return code", "rc": 2, "start": "2022-06-13 14:44:50.705635", "stderr": "ls: cannot access '/tmp/does-not-exist': No such file or directory", "stderr_lines": ["ls: cannot access '/tmp/does-not-exist': No such file or directory"], "stdout": "", "stdout_lines": []}
_____________________________________________________
< TASK [okrole : OK ROLE List home directory content] >
-----------------------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
changed: [localhost]
________________________________________
< TASK [Rescue block (perform recovery)] >
----------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
ok: [localhost] => {
"msg": "Something went wrong, cleaning up.."
}
____________
< PLAY RECAP >
------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
localhost : ok=2 changed=1 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78042
|
https://github.com/ansible/ansible/pull/78676
|
848143640ba88f34e6e952faba4e1b5fd1c1b2dd
|
fd19ff231055c439c6a2e9bb590fef09818b2afc
| 2022-06-13T04:58:40Z |
python
| 2022-09-06T15:11:49Z |
lib/ansible/plugins/strategy/__init__.py
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import cmd
import functools
import os
import pprint
import queue
import sys
import threading
import time
from collections import deque
from multiprocessing import Lock
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleUndefinedVariable, AnsibleParserError
from ansible.executor import action_write_locks
from ansible.executor.play_iterator import IteratingStates
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_result import TaskResult
from ansible.executor.task_queue_manager import CallbackSend, DisplaySend
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.playbook.conditional import Conditional
from ansible.playbook.handler import Handler
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.plugins import loader as plugin_loader
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.fqcn import add_internal_fqcns
from ansible.utils.unsafe_proxy import wrap_var
from ansible.utils.vars import combine_vars
from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
display = Display()
__all__ = ['StrategyBase']
# This list can be an exact match, or start of string bound
# does not accept regex
ALWAYS_DELEGATE_FACT_PREFIXES = frozenset((
'discovered_interpreter_',
))
class StrategySentinel:
pass
_sentinel = StrategySentinel()
def post_process_whens(result, task, templar, task_vars):
cond = None
if task.changed_when:
with templar.set_temporary_context(available_variables=task_vars):
cond = Conditional(loader=templar._loader)
cond.when = task.changed_when
result['changed'] = cond.evaluate_conditional(templar, templar.available_variables)
if task.failed_when:
with templar.set_temporary_context(available_variables=task_vars):
if cond is None:
cond = Conditional(loader=templar._loader)
cond.when = task.failed_when
failed_when_result = cond.evaluate_conditional(templar, templar.available_variables)
result['failed_when_result'] = result['failed'] = failed_when_result
def _get_item_vars(result, task):
item_vars = {}
if task.loop or task.loop_with:
loop_var = result.get('ansible_loop_var', 'item')
index_var = result.get('ansible_index_var')
if loop_var in result:
item_vars[loop_var] = result[loop_var]
if index_var and index_var in result:
item_vars[index_var] = result[index_var]
if '_ansible_item_label' in result:
item_vars['_ansible_item_label'] = result['_ansible_item_label']
if 'ansible_loop' in result:
item_vars['ansible_loop'] = result['ansible_loop']
return item_vars
def results_thread_main(strategy):
while True:
try:
result = strategy._final_q.get()
if isinstance(result, StrategySentinel):
break
elif isinstance(result, DisplaySend):
display.display(*result.args, **result.kwargs)
elif isinstance(result, CallbackSend):
for arg in result.args:
if isinstance(arg, TaskResult):
strategy.normalize_task_result(arg)
break
strategy._tqm.send_callback(result.method_name, *result.args, **result.kwargs)
elif isinstance(result, TaskResult):
strategy.normalize_task_result(result)
with strategy._results_lock:
strategy._results.append(result)
else:
display.warning('Received an invalid object (%s) in the result queue: %r' % (type(result), result))
except (IOError, EOFError):
break
except queue.Empty:
pass
def debug_closure(func):
"""Closure to wrap ``StrategyBase._process_pending_results`` and invoke the task debugger"""
@functools.wraps(func)
def inner(self, iterator, one_pass=False, max_passes=None):
status_to_stats_map = (
('is_failed', 'failures'),
('is_unreachable', 'dark'),
('is_changed', 'changed'),
('is_skipped', 'skipped'),
)
# We don't know the host yet, copy the previous states, for lookup after we process new results
prev_host_states = iterator.host_states.copy()
results = func(self, iterator, one_pass=one_pass, max_passes=max_passes)
_processed_results = []
for result in results:
task = result._task
host = result._host
_queued_task_args = self._queued_task_cache.pop((host.name, task._uuid), None)
task_vars = _queued_task_args['task_vars']
play_context = _queued_task_args['play_context']
# Try to grab the previous host state, if it doesn't exist use get_host_state to generate an empty state
try:
prev_host_state = prev_host_states[host.name]
except KeyError:
prev_host_state = iterator.get_host_state(host)
while result.needs_debugger(globally_enabled=self.debugger_active):
next_action = NextAction()
dbg = Debugger(task, host, task_vars, play_context, result, next_action)
dbg.cmdloop()
if next_action.result == NextAction.REDO:
# rollback host state
self._tqm.clear_failed_hosts()
if task.run_once and iterator._play.strategy in add_internal_fqcns(('linear',)) and result.is_failed():
for host_name, state in prev_host_states.items():
if host_name == host.name:
continue
iterator.set_state_for_host(host_name, state)
iterator._play._removed_hosts.remove(host_name)
iterator.set_state_for_host(host.name, prev_host_state)
for method, what in status_to_stats_map:
if getattr(result, method)():
self._tqm._stats.decrement(what, host.name)
self._tqm._stats.decrement('ok', host.name)
# redo
self._queue_task(host, task, task_vars, play_context)
_processed_results.extend(debug_closure(func)(self, iterator, one_pass))
break
elif next_action.result == NextAction.CONTINUE:
_processed_results.append(result)
break
elif next_action.result == NextAction.EXIT:
# Matches KeyboardInterrupt from bin/ansible
sys.exit(99)
else:
_processed_results.append(result)
return _processed_results
return inner
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
# by default, strategies should support throttling but we allow individual
# strategies to disable this and either forego supporting it or managing
# the throttling internally (as `free` does)
ALLOW_BASE_THROTTLING = True
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm._workers
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
self._step = context.CLIARGS.get('step', False)
self._diff = context.CLIARGS.get('diff', False)
# the task cache is a dictionary of tuples of (host.name, task._uuid)
# used to find the original task object of in-flight tasks and to store
# the task args/vars and play context info used to queue the task.
self._queued_task_cache = {}
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
# internal counters
self._pending_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
self._results = deque()
self._results_lock = threading.Condition(threading.Lock())
# create the result processing thread for reading results in the background
self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
self._results_thread.daemon = True
self._results_thread.start()
# holds the list of active (persistent) connections to be shutdown at
# play completion
self._active_connections = dict()
# Caches for get_host calls, to avoid calling excessively
# These values should be set at the top of the ``run`` method of each
# strategy plugin. Use ``_set_hosts_cache`` to set these values
self._hosts_cache = []
self._hosts_cache_all = []
self.debugger_active = C.ENABLE_TASK_DEBUGGER
def _set_hosts_cache(self, play, refresh=True):
"""Responsible for setting _hosts_cache and _hosts_cache_all
See comment in ``__init__`` for the purpose of these caches
"""
if not refresh and all((self._hosts_cache, self._hosts_cache_all)):
return
if not play.finalized and Templar(None).is_template(play.hosts):
_pattern = 'all'
else:
_pattern = play.hosts or 'all'
self._hosts_cache_all = [h.name for h in self._inventory.get_hosts(pattern=_pattern, ignore_restrictions=True)]
self._hosts_cache = [h.name for h in self._inventory.get_hosts(play.hosts, order=play.order)]
def cleanup(self):
# close active persistent connections
for sock in self._active_connections.values():
try:
conn = Connection(sock)
conn.reset()
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
self._final_q.put(_sentinel)
self._results_thread.join()
def run(self, iterator, play_context, result=0):
# execute one more pass through the iterator without peeking, to
# make sure that all of the hosts are advanced to their final task.
# This should be safe, as everything should be IteratingStates.COMPLETE by
# this point, though the strategy may not advance the hosts itself.
for host in self._hosts_cache:
if host not in self._tqm._unreachable_hosts:
try:
iterator.get_next_task_for_host(self._inventory.hosts[host])
except KeyError:
iterator.get_next_task_for_host(self._inventory.get_host(host))
# return the appropriate code, depending on the status hosts after the run
if not isinstance(result, bool) and result != self._tqm.RUN_OK:
return result
elif len(self._tqm._unreachable_hosts.keys()) > 0:
return self._tqm.RUN_UNREACHABLE_HOSTS
elif len(iterator.get_failed_hosts()) > 0:
return self._tqm.RUN_FAILED_HOSTS
else:
return self._tqm.RUN_OK
def get_hosts_remaining(self, play):
self._set_hosts_cache(play, refresh=False)
ignore = set(self._tqm._failed_hosts).union(self._tqm._unreachable_hosts)
return [host for host in self._hosts_cache if host not in ignore]
def get_failed_hosts(self, play):
self._set_hosts_cache(play, refresh=False)
return [host for host in self._hosts_cache if host in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
'''
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
'''
vars['ansible_current_hosts'] = self.get_hosts_remaining(play)
vars['ansible_failed_hosts'] = self.get_failed_hosts(play)
def _queue_task(self, host, task, task_vars, play_context):
''' handles queueing the task up to be sent to a worker '''
display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
# Add a write lock for tasks.
# Maybe this should be added somewhere further up the call stack but
# this is the earliest in the code where we have task (1) extracted
# into its own variable and (2) there's only a single code path
# leading to the module being run. This is called by two
# functions: linear.py::run(), and
# free.py::run() so we'd have to add to both to do it there.
# The next common higher level is __init__.py::run() and that has
# tasks inside of play_iterator so we'd have to extract them to do it
# there.
if task.action not in action_write_locks.action_write_locks:
display.debug('Creating lock for %s' % task.action)
action_write_locks.action_write_locks[task.action] = Lock()
# create a templar and template things we need later for the queuing process
templar = Templar(loader=self._loader, variables=task_vars)
try:
throttle = int(templar.template(task.throttle))
except Exception as e:
raise AnsibleError("Failed to convert the throttle value to an integer.", obj=task._ds, orig_exc=e)
# and then queue the new task
try:
# Determine the "rewind point" of the worker list. This means we start
# iterating over the list of workers until the end of the list is found.
# Normally, that is simply the length of the workers list (as determined
# by the forks or serial setting), however a task/block/play may "throttle"
# that limit down.
rewind_point = len(self._workers)
if throttle > 0 and self.ALLOW_BASE_THROTTLING:
if task.run_once:
display.debug("Ignoring 'throttle' as 'run_once' is also set for '%s'" % task.get_name())
else:
if throttle <= rewind_point:
display.debug("task: %s, throttle: %d" % (task.get_name(), throttle))
rewind_point = throttle
queued = False
starting_worker = self._cur_worker
while True:
if self._cur_worker >= rewind_point:
self._cur_worker = 0
worker_prc = self._workers[self._cur_worker]
if worker_prc is None or not worker_prc.is_alive():
self._queued_task_cache[(host.name, task._uuid)] = {
'host': host,
'task': task,
'task_vars': task_vars,
'play_context': play_context
}
worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, plugin_loader)
self._workers[self._cur_worker] = worker_prc
self._tqm.send_callback('v2_runner_on_start', host, task)
worker_prc.start()
display.debug("worker is %d (out of %d available)" % (self._cur_worker + 1, len(self._workers)))
queued = True
self._cur_worker += 1
if self._cur_worker >= rewind_point:
self._cur_worker = 0
if queued:
break
elif self._cur_worker == starting_worker:
time.sleep(0.0001)
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
display.debug("got an error while queuing: %s" % e)
return
display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
def get_task_hosts(self, iterator, task_host, task):
if task.run_once:
host_list = [host for host in self._hosts_cache if host not in self._tqm._unreachable_hosts]
else:
host_list = [task_host.name]
return host_list
def get_delegated_hosts(self, result, task):
host_name = result.get('_ansible_delegated_vars', {}).get('ansible_delegated_host', None)
return [host_name or task.delegate_to]
def _set_always_delegated_facts(self, result, task):
"""Sets host facts for ``delegate_to`` hosts for facts that should
always be delegated
This operation mutates ``result`` to remove the always delegated facts
See ``ALWAYS_DELEGATE_FACT_PREFIXES``
"""
if task.delegate_to is None:
return
facts = result['ansible_facts']
always_keys = set()
_add = always_keys.add
for fact_key in facts:
for always_key in ALWAYS_DELEGATE_FACT_PREFIXES:
if fact_key.startswith(always_key):
_add(fact_key)
if always_keys:
_pop = facts.pop
always_facts = {
'ansible_facts': dict((k, _pop(k)) for k in list(facts) if k in always_keys)
}
host_list = self.get_delegated_hosts(result, task)
_set_host_facts = self._variable_manager.set_host_facts
for target_host in host_list:
_set_host_facts(target_host, always_facts)
def normalize_task_result(self, task_result):
"""Normalize a TaskResult to reference actual Host and Task objects
when only given the ``Host.name``, or the ``Task._uuid``
Only the ``Host.name`` and ``Task._uuid`` are commonly sent back from
the ``TaskExecutor`` or ``WorkerProcess`` due to performance concerns
Mutates the original object
"""
if isinstance(task_result._host, string_types):
# If the value is a string, it is ``Host.name``
task_result._host = self._inventory.get_host(to_text(task_result._host))
if isinstance(task_result._task, string_types):
# If the value is a string, it is ``Task._uuid``
queue_cache_entry = (task_result._host.name, task_result._task)
try:
found_task = self._queued_task_cache[queue_cache_entry]['task']
except KeyError:
# This should only happen due to an implicit task created by the
# TaskExecutor, restrict this behavior to the explicit use case
# of an implicit async_status task
if task_result._task_fields.get('action') != 'async_status':
raise
original_task = Task()
else:
original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
original_task._parent = found_task._parent
original_task.from_attrs(task_result._task_fields)
task_result._task = original_task
return task_result
@debug_closure
def _process_pending_results(self, iterator, one_pass=False, max_passes=None):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
handler_templar = Templar(self._loader)
def search_handler_blocks_by_name(handler_name, handler_blocks):
# iterate in reversed order since last handler loaded with the same name wins
for handler_block in reversed(handler_blocks):
for handler_task in handler_block.block:
if handler_task.name:
try:
if not handler_task.cached_name:
if handler_templar.is_template(handler_task.name):
handler_templar.available_variables = self._variable_manager.get_vars(play=iterator._play,
task=handler_task,
_hosts=self._hosts_cache,
_hosts_all=self._hosts_cache_all)
handler_task.name = handler_templar.template(handler_task.name)
handler_task.cached_name = True
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
candidates = (
handler_task.name,
handler_task.get_name(include_role_fqcn=False),
handler_task.get_name(include_role_fqcn=True),
)
if handler_name in candidates:
return handler_task
except (UndefinedError, AnsibleUndefinedVariable) as e:
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
if not handler_task.listen:
display.warning(
"Handler '%s' is unusable because it has no listen topics and "
"the name could not be templated (host-specific variables are "
"not supported in handler names). The error: %s" % (handler_task.name, to_text(e))
)
continue
cur_pass = 0
while True:
try:
self._results_lock.acquire()
task_result = self._results.popleft()
except IndexError:
break
finally:
self._results_lock.release()
original_host = task_result._host
original_task = task_result._task
# all host status messages contain 2 entries: (msg, task_result)
role_ran = False
if task_result.is_failed():
role_ran = True
ignore_errors = original_task.ignore_errors
if not ignore_errors:
display.debug("marking %s as failed" % original_host.name)
if original_task.run_once:
# if we're using run_once, we have to fail every host here
for h in self._inventory.get_hosts(iterator._play.hosts):
if h.name not in self._tqm._unreachable_hosts:
iterator.mark_host_failed(h)
else:
iterator.mark_host_failed(original_host)
# grab the current state and if we're iterating on the rescue portion
# of a block then we save the failed task in a special var for use
# within the rescue/always
state, _ = iterator.get_next_task_for_host(original_host, peek=True)
if iterator.is_failed(original_host) and state and state.run_state == IteratingStates.COMPLETE:
self._tqm._failed_hosts[original_host.name] = True
# Use of get_active_state() here helps detect proper state if, say, we are in a rescue
# block from an included file (include_tasks). In a non-included rescue case, a rescue
# that starts with a new 'block' will have an active state of IteratingStates.TASKS, so we also
# check the current state block tree to see if any blocks are rescuing.
if state and (iterator.get_active_state(state).run_state == IteratingStates.RESCUE or
iterator.is_any_block_rescuing(state)):
self._tqm._stats.increment('rescued', original_host.name)
iterator._play._removed_hosts.remove(original_host.name)
self._variable_manager.set_nonpersistent_facts(
original_host.name,
dict(
ansible_failed_task=wrap_var(original_task.serialize()),
ansible_failed_result=task_result._result,
),
)
else:
self._tqm._stats.increment('failures', original_host.name)
else:
self._tqm._stats.increment('ok', original_host.name)
self._tqm._stats.increment('ignored', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors)
elif task_result.is_unreachable():
ignore_unreachable = original_task.ignore_unreachable
if not ignore_unreachable:
self._tqm._unreachable_hosts[original_host.name] = True
iterator._play._removed_hosts.append(original_host.name)
self._tqm._stats.increment('dark', original_host.name)
else:
self._tqm._stats.increment('ok', original_host.name)
self._tqm._stats.increment('ignored', original_host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif task_result.is_skipped():
self._tqm._stats.increment('skipped', original_host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
else:
role_ran = True
if original_task.loop:
# this task had a loop, and has more than one result, so
# loop over all of them instead of a single result
result_items = task_result._result.get('results', [])
else:
result_items = [task_result._result]
for result_item in result_items:
if '_ansible_notify' in result_item:
if task_result.is_changed():
# The shared dictionary for notified handlers is a proxy, which
# does not detect when sub-objects within the proxy are modified.
# So, per the docs, we reassign the list so the proxy picks up and
# notifies all other threads
for handler_name in result_item['_ansible_notify']:
found = False
# Find the handler using the above helper. First we look up the
# dependency chain of the current task (if it's from a role), otherwise
# we just look through the list of handlers in the current play/all
# roles and use the first one that matches the notify name
target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers)
if target_handler is not None:
found = True
if target_handler.notify_host(original_host):
self._tqm.send_callback('v2_playbook_on_notify', target_handler, original_host)
for listening_handler_block in iterator._play.handlers:
for listening_handler in listening_handler_block.block:
listeners = getattr(listening_handler, 'listen', []) or []
if not listeners:
continue
listeners = listening_handler.get_validated_value(
'listen', listening_handler.fattributes.get('listen'), listeners, handler_templar
)
if handler_name not in listeners:
continue
else:
found = True
if listening_handler.notify_host(original_host):
self._tqm.send_callback('v2_playbook_on_notify', listening_handler, original_host)
# and if none were found, then we raise an error
if not found:
msg = ("The requested handler '%s' was not found in either the main handlers list nor in the listening "
"handlers list" % handler_name)
if C.ERROR_ON_MISSING_HANDLER:
raise AnsibleError(msg)
else:
display.warning(msg)
if 'add_host' in result_item:
# this task added a new host (add_host module)
new_host_info = result_item.get('add_host', dict())
self._inventory.add_dynamic_host(new_host_info, result_item)
# ensure host is available for subsequent plays
if result_item.get('changed') and new_host_info['host_name'] not in self._hosts_cache_all:
self._hosts_cache_all.append(new_host_info['host_name'])
elif 'add_group' in result_item:
# this task added a new group (group_by module)
self._inventory.add_dynamic_group(original_host, result_item)
if 'add_host' in result_item or 'add_group' in result_item:
item_vars = _get_item_vars(result_item, original_task)
found_task_vars = self._queued_task_cache.get((original_host.name, task_result._task._uuid))['task_vars']
if item_vars:
all_task_vars = combine_vars(found_task_vars, item_vars)
else:
all_task_vars = found_task_vars
all_task_vars[original_task.register] = wrap_var(result_item)
post_process_whens(result_item, original_task, handler_templar, all_task_vars)
if original_task.loop or original_task.loop_with:
new_item_result = TaskResult(
task_result._host,
task_result._task,
result_item,
task_result._task_fields,
)
self._tqm.send_callback('v2_runner_item_on_ok', new_item_result)
if result_item.get('changed', False):
task_result._result['changed'] = True
if result_item.get('failed', False):
task_result._result['failed'] = True
if 'ansible_facts' in result_item and original_task.action not in C._ACTION_DEBUG:
# if delegated fact and we are delegating facts, we need to change target host for them
if original_task.delegate_to is not None and original_task.delegate_facts:
host_list = self.get_delegated_hosts(result_item, original_task)
else:
# Set facts that should always be on the delegated hosts
self._set_always_delegated_facts(result_item, original_task)
host_list = self.get_task_hosts(iterator, original_host, original_task)
if original_task.action in C._ACTION_INCLUDE_VARS:
for (var_name, var_value) in result_item['ansible_facts'].items():
# find the host we're actually referring too here, which may
# be a host that is not really in inventory at all
for target_host in host_list:
self._variable_manager.set_host_variable(target_host, var_name, var_value)
else:
cacheable = result_item.pop('_ansible_facts_cacheable', False)
for target_host in host_list:
# so set_fact is a misnomer but 'cacheable = true' was meant to create an 'actual fact'
# to avoid issues with precedence and confusion with set_fact normal operation,
# we set BOTH fact and nonpersistent_facts (aka hostvar)
# when fact is retrieved from cache in subsequent operations it will have the lower precedence,
# but for playbook setting it the 'higher' precedence is kept
is_set_fact = original_task.action in C._ACTION_SET_FACT
if not is_set_fact or cacheable:
self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy())
if is_set_fact:
self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy())
if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']:
if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']:
host_list = self.get_task_hosts(iterator, original_host, original_task)
else:
host_list = [None]
data = result_item['ansible_stats']['data']
aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate']
for myhost in host_list:
for k in data.keys():
if aggregate:
self._tqm._stats.update_custom_stats(k, data[k], myhost)
else:
self._tqm._stats.set_custom_stats(k, data[k], myhost)
if 'diff' in task_result._result:
if self._diff or getattr(original_task, 'diff', False):
self._tqm.send_callback('v2_on_file_diff', task_result)
if not isinstance(original_task, TaskInclude):
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
# finally, send the ok for this task
self._tqm.send_callback('v2_runner_on_ok', task_result)
# register final results
if original_task.register:
host_list = self.get_task_hosts(iterator, original_host, original_task)
clean_copy = strip_internal_keys(module_response_deepcopy(task_result._result))
if 'invocation' in clean_copy:
del clean_copy['invocation']
for target_host in host_list:
self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy})
self._pending_results -= 1
if original_host.name in self._blocked_hosts:
del self._blocked_hosts[original_host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if original_task._role is not None and role_ran: # TODO: and original_task.action not in C._ACTION_INCLUDE_ROLE:?
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in iterator._play.ROLE_CACHE[original_task._role.get_name()].items():
if role_obj._uuid == original_task._role._uuid:
role_obj._had_task_run[original_host.name] = True
ret_results.append(task_result)
if isinstance(original_task, Handler):
for handler in (h for b in iterator._play.handlers for h in b.block if h._uuid == original_task._uuid):
handler.remove_host(original_host)
if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes:
break
cur_pass += 1
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
display.debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator)
ret_results.extend(results)
if self._pending_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending results, returning what we have")
return ret_results
def _copy_included_file(self, included_file):
'''
A proven safe and performant way to create a copy of an included file
'''
ti_copy = included_file._task.copy(exclude_parent=True)
ti_copy._parent = included_file._task._parent
temp_vars = ti_copy.vars | included_file._vars
ti_copy.vars = temp_vars
return ti_copy
def _load_included_file(self, included_file, iterator, is_handler=False):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
Raises AnsibleError exception in case of a failure during including a file,
in such case the caller is responsible for marking the host(s) as failed
using PlayIterator.mark_host_failed().
'''
display.debug("loading included file: %s" % included_file._filename)
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
elif not isinstance(data, list):
raise AnsibleError("included task files must contain a list of tasks")
ti_copy = self._copy_included_file(included_file)
block_list = load_list_of_blocks(
data,
play=iterator._play,
parent_block=ti_copy.build_parent_block(),
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader,
variable_manager=self._variable_manager,
)
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
except AnsibleParserError:
raise
except AnsibleError as e:
if isinstance(e, AnsibleFileNotFound):
reason = "Could not find or access '%s' on the Ansible Controller." % to_text(e.file_name)
else:
reason = to_text(e)
for r in included_file._results:
r._result['failed'] = True
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=reason))
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
raise AnsibleError(reason) from e
# finally, send the callback and return the list of blocks loaded
self._tqm.send_callback('v2_playbook_on_include', included_file)
display.debug("done processing included file")
return block_list
def _take_step(self, task, host=None):
ret = False
msg = u'Perform task: %s ' % task
if host:
msg += u'on %s ' % host
msg += u'(N)o/(y)es/(c)ontinue: '
resp = display.prompt(msg)
if resp.lower() in ['y', 'yes']:
display.debug("User ran task")
ret = True
elif resp.lower() in ['c', 'continue']:
display.debug("User ran task and canceled step mode")
self._step = False
ret = True
else:
display.debug("User skipped task")
display.banner(msg)
return ret
def _cond_not_supported_warn(self, task_name):
display.warning("%s task does not support when conditional" % task_name)
def _execute_meta(self, task, play_context, iterator, target_host):
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
def _evaluate_conditional(h):
all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
templar = Templar(loader=self._loader, variables=all_vars)
return task.evaluate_conditional(templar, all_vars)
skipped = False
msg = ''
skip_reason = '%s conditional evaluated to False' % meta_action
if isinstance(task, Handler):
self._tqm.send_callback('v2_playbook_on_handler_task_start', task)
else:
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
# These don't support "when" conditionals
if meta_action in ('noop', 'refresh_inventory', 'reset_connection') and task.when:
self._cond_not_supported_warn(meta_action)
if meta_action == 'noop':
msg = "noop"
elif meta_action == 'flush_handlers':
if _evaluate_conditional(target_host):
host_state = iterator.get_state_for_host(target_host.name)
if host_state.run_state == IteratingStates.HANDLERS:
raise AnsibleError('flush_handlers cannot be used as a handler')
if target_host.name not in self._tqm._unreachable_hosts:
host_state.pre_flushing_run_state = host_state.run_state
host_state.run_state = IteratingStates.HANDLERS
msg = "triggered running handlers for %s" % target_host.name
else:
skipped = True
skip_reason += ', not running handlers for %s' % target_host.name
elif meta_action == 'refresh_inventory':
self._inventory.refresh_inventory()
self._set_hosts_cache(iterator._play)
msg = "inventory successfully refreshed"
elif meta_action == 'clear_facts':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
hostname = host.get_name()
self._variable_manager.clear_facts(hostname)
msg = "facts cleared"
else:
skipped = True
skip_reason += ', not clearing facts and fact cache for %s' % target_host.name
elif meta_action == 'clear_host_errors':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
self._tqm._failed_hosts.pop(host.name, False)
self._tqm._unreachable_hosts.pop(host.name, False)
iterator.clear_host_errors(host)
msg = "cleared host errors"
else:
skipped = True
skip_reason += ', not clearing host error state for %s' % target_host.name
elif meta_action == 'end_batch':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator.set_run_state_for_host(host.name, IteratingStates.COMPLETE)
msg = "ending batch"
else:
skipped = True
skip_reason += ', continuing current batch'
elif meta_action == 'end_play':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator.set_run_state_for_host(host.name, IteratingStates.COMPLETE)
# end_play is used in PlaybookExecutor/TQM to indicate that
# the whole play is supposed to be ended as opposed to just a batch
iterator.end_play = True
msg = "ending play"
else:
skipped = True
skip_reason += ', continuing play'
elif meta_action == 'end_host':
if _evaluate_conditional(target_host):
iterator.set_run_state_for_host(target_host.name, IteratingStates.COMPLETE)
iterator._play._removed_hosts.append(target_host.name)
msg = "ending play for %s" % target_host.name
else:
skipped = True
skip_reason += ", continuing execution for %s" % target_host.name
# TODO: Nix msg here? Left for historical reasons, but skip_reason exists now.
msg = "end_host conditional evaluated to false, continuing execution for %s" % target_host.name
elif meta_action == 'role_complete':
# Allow users to use this in a play as reported in https://github.com/ansible/ansible/issues/22286?
# How would this work with allow_duplicates??
if task.implicit:
if target_host.name in task._role._had_task_run:
task._role._completed[target_host.name] = True
msg = 'role_complete for %s' % target_host.name
elif meta_action == 'reset_connection':
all_vars = self._variable_manager.get_vars(play=iterator._play, host=target_host, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
templar = Templar(loader=self._loader, variables=all_vars)
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
play_context = play_context.set_task_and_variable_override(task=task, variables=all_vars, templar=templar)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
play_context.post_validate(templar=templar)
# now that the play context is finalized, if the remote_addr is not set
# default to using the host's address field as the remote address
if not play_context.remote_addr:
play_context.remote_addr = target_host.address
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist. This 'mostly' works here cause meta
# disregards the loop, but should not really use play_context at all
play_context.update_vars(all_vars)
if target_host in self._active_connections:
connection = Connection(self._active_connections[target_host])
del self._active_connections[target_host]
else:
connection = plugin_loader.connection_loader.get(play_context.connection, play_context, os.devnull)
connection.set_options(task_keys=task.dump_attrs(), var_options=all_vars)
play_context.set_attributes_from_plugin(connection)
if connection:
try:
connection.reset()
msg = 'reset connection'
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
else:
msg = 'no connection, nothing to reset'
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
result = {'msg': msg}
if skipped:
result['skipped'] = True
result['skip_reason'] = skip_reason
else:
result['changed'] = False
display.vv("META: %s" % msg)
if isinstance(task, Handler):
task.remove_host(target_host)
res = TaskResult(target_host, task, result)
if skipped:
self._tqm.send_callback('v2_runner_on_skipped', res)
return [res]
def get_hosts_left(self, iterator):
''' returns list of available hosts for this iterator by filtering out unreachables '''
hosts_left = []
for host in self._hosts_cache:
if host not in self._tqm._unreachable_hosts:
try:
hosts_left.append(self._inventory.hosts[host])
except KeyError:
hosts_left.append(self._inventory.get_host(host))
return hosts_left
def update_active_connections(self, results):
''' updates the current active persistent connections '''
for r in results:
if 'args' in r._task_fields:
socket_path = r._task_fields['args'].get('_ansible_socket')
if socket_path:
if r._host not in self._active_connections:
self._active_connections[r._host] = socket_path
class NextAction(object):
""" The next action after an interpreter's exit. """
REDO = 1
CONTINUE = 2
EXIT = 3
def __init__(self, result=EXIT):
self.result = result
class Debugger(cmd.Cmd):
prompt_continuous = '> ' # multiple lines
def __init__(self, task, host, task_vars, play_context, result, next_action):
# cmd.Cmd is old-style class
cmd.Cmd.__init__(self)
self.prompt = '[%s] %s (debug)> ' % (host, task)
self.intro = None
self.scope = {}
self.scope['task'] = task
self.scope['task_vars'] = task_vars
self.scope['host'] = host
self.scope['play_context'] = play_context
self.scope['result'] = result
self.next_action = next_action
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
pass
do_h = cmd.Cmd.do_help
def do_EOF(self, args):
"""Quit"""
return self.do_quit(args)
def do_quit(self, args):
"""Quit"""
display.display('User interrupted execution')
self.next_action.result = NextAction.EXIT
return True
do_q = do_quit
def do_continue(self, args):
"""Continue to next result"""
self.next_action.result = NextAction.CONTINUE
return True
do_c = do_continue
def do_redo(self, args):
"""Schedule task for re-execution. The re-execution may not be the next result"""
self.next_action.result = NextAction.REDO
return True
do_r = do_redo
def do_update_task(self, args):
"""Recreate the task from ``task._ds``, and template with updated ``task_vars``"""
templar = Templar(None, variables=self.scope['task_vars'])
task = self.scope['task']
task = task.load_data(task._ds)
task.post_validate(templar)
self.scope['task'] = task
do_u = do_update_task
def evaluate(self, args):
try:
return eval(args, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def do_pprint(self, args):
"""Pretty Print"""
try:
result = self.evaluate(args)
display.display(pprint.pformat(result))
except Exception:
pass
do_p = do_pprint
def execute(self, args):
try:
code = compile(args + '\n', '<stdin>', 'single')
exec(code, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def default(self, line):
try:
self.execute(line)
except Exception:
pass
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,042 |
Block with `rescue` containing block with `always` inside. Strange stats behavior.
|
### Summary
I have a strange behavior of `rescued` stat after run. I expected it to be equal 1 since there was a rescue block execution but instead it has value of 0.
This is caused by block-with-always role inside block with rescue.
Is this expected behavior? How can I make sure if rescue block was actually executed in this scenario, if I'm only able to get an error?
### Issue Type
Bug Report
### Component Name
block
### Ansible Version
```console
ansible [core 2.13.0]
```
### Configuration
```console
All defaults
```
### OS / Environment
Arch Linux.
### Steps to Reproduce
Here's my main playbook:
```
---
- name: Ansible Blocks
hosts: localhost
gather_facts: false
tasks:
- name: kek
block:
- name: Run Main Role
import_role:
name: mainerrrole
rescue:
- name: Rescue block (perform recovery)
debug:
msg: "Something went wrong, cleaning up.."
```
Here's the `mainerrrole` code
```
- name: Deploy
block:
- name: ERR ROLE Failing intentionally by role
command: "ls -l /tmp/does-not-exist"
always:
- name: Cleanup
import_role:
name: okrole
```
Here's the `okrole` code
```
- name: OK ROLE List home directory content
command: "ls -l ~/"
```
### Expected Results
Expected run stats to contain rescued=1 since there was a rescue block execution in logs.
### Actual Results
```console
ansible-playbook play.yml
[WARNING]: No inventory was parsed, only implicit localhost is available
[WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'
_______________________
< PLAY [Ansible Blocks] >
-----------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
_____________________________________________________________
< TASK [mainerrrole : ERR ROLE Failing intentionally by role] >
-------------------------------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
fatal: [localhost]: FAILED! => {"changed": true, "cmd": ["ls", "-l", "/tmp/does-not-exist"], "delta": "0:00:00.005622", "end": "2022-06-13 14:44:50.711257", "msg": "non-zero return code", "rc": 2, "start": "2022-06-13 14:44:50.705635", "stderr": "ls: cannot access '/tmp/does-not-exist': No such file or directory", "stderr_lines": ["ls: cannot access '/tmp/does-not-exist': No such file or directory"], "stdout": "", "stdout_lines": []}
_____________________________________________________
< TASK [okrole : OK ROLE List home directory content] >
-----------------------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
changed: [localhost]
________________________________________
< TASK [Rescue block (perform recovery)] >
----------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
ok: [localhost] => {
"msg": "Something went wrong, cleaning up.."
}
____________
< PLAY RECAP >
------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
localhost : ok=2 changed=1 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78042
|
https://github.com/ansible/ansible/pull/78676
|
848143640ba88f34e6e952faba4e1b5fd1c1b2dd
|
fd19ff231055c439c6a2e9bb590fef09818b2afc
| 2022-06-13T04:58:40Z |
python
| 2022-09-06T15:11:49Z |
test/integration/targets/blocks/43191-2.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,042 |
Block with `rescue` containing block with `always` inside. Strange stats behavior.
|
### Summary
I have a strange behavior of `rescued` stat after run. I expected it to be equal 1 since there was a rescue block execution but instead it has value of 0.
This is caused by block-with-always role inside block with rescue.
Is this expected behavior? How can I make sure if rescue block was actually executed in this scenario, if I'm only able to get an error?
### Issue Type
Bug Report
### Component Name
block
### Ansible Version
```console
ansible [core 2.13.0]
```
### Configuration
```console
All defaults
```
### OS / Environment
Arch Linux.
### Steps to Reproduce
Here's my main playbook:
```
---
- name: Ansible Blocks
hosts: localhost
gather_facts: false
tasks:
- name: kek
block:
- name: Run Main Role
import_role:
name: mainerrrole
rescue:
- name: Rescue block (perform recovery)
debug:
msg: "Something went wrong, cleaning up.."
```
Here's the `mainerrrole` code
```
- name: Deploy
block:
- name: ERR ROLE Failing intentionally by role
command: "ls -l /tmp/does-not-exist"
always:
- name: Cleanup
import_role:
name: okrole
```
Here's the `okrole` code
```
- name: OK ROLE List home directory content
command: "ls -l ~/"
```
### Expected Results
Expected run stats to contain rescued=1 since there was a rescue block execution in logs.
### Actual Results
```console
ansible-playbook play.yml
[WARNING]: No inventory was parsed, only implicit localhost is available
[WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'
_______________________
< PLAY [Ansible Blocks] >
-----------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
_____________________________________________________________
< TASK [mainerrrole : ERR ROLE Failing intentionally by role] >
-------------------------------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
fatal: [localhost]: FAILED! => {"changed": true, "cmd": ["ls", "-l", "/tmp/does-not-exist"], "delta": "0:00:00.005622", "end": "2022-06-13 14:44:50.711257", "msg": "non-zero return code", "rc": 2, "start": "2022-06-13 14:44:50.705635", "stderr": "ls: cannot access '/tmp/does-not-exist': No such file or directory", "stderr_lines": ["ls: cannot access '/tmp/does-not-exist': No such file or directory"], "stdout": "", "stdout_lines": []}
_____________________________________________________
< TASK [okrole : OK ROLE List home directory content] >
-----------------------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
changed: [localhost]
________________________________________
< TASK [Rescue block (perform recovery)] >
----------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
ok: [localhost] => {
"msg": "Something went wrong, cleaning up.."
}
____________
< PLAY RECAP >
------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
localhost : ok=2 changed=1 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78042
|
https://github.com/ansible/ansible/pull/78676
|
848143640ba88f34e6e952faba4e1b5fd1c1b2dd
|
fd19ff231055c439c6a2e9bb590fef09818b2afc
| 2022-06-13T04:58:40Z |
python
| 2022-09-06T15:11:49Z |
test/integration/targets/blocks/43191.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 78,042 |
Block with `rescue` containing block with `always` inside. Strange stats behavior.
|
### Summary
I have a strange behavior of `rescued` stat after run. I expected it to be equal 1 since there was a rescue block execution but instead it has value of 0.
This is caused by block-with-always role inside block with rescue.
Is this expected behavior? How can I make sure if rescue block was actually executed in this scenario, if I'm only able to get an error?
### Issue Type
Bug Report
### Component Name
block
### Ansible Version
```console
ansible [core 2.13.0]
```
### Configuration
```console
All defaults
```
### OS / Environment
Arch Linux.
### Steps to Reproduce
Here's my main playbook:
```
---
- name: Ansible Blocks
hosts: localhost
gather_facts: false
tasks:
- name: kek
block:
- name: Run Main Role
import_role:
name: mainerrrole
rescue:
- name: Rescue block (perform recovery)
debug:
msg: "Something went wrong, cleaning up.."
```
Here's the `mainerrrole` code
```
- name: Deploy
block:
- name: ERR ROLE Failing intentionally by role
command: "ls -l /tmp/does-not-exist"
always:
- name: Cleanup
import_role:
name: okrole
```
Here's the `okrole` code
```
- name: OK ROLE List home directory content
command: "ls -l ~/"
```
### Expected Results
Expected run stats to contain rescued=1 since there was a rescue block execution in logs.
### Actual Results
```console
ansible-playbook play.yml
[WARNING]: No inventory was parsed, only implicit localhost is available
[WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'
_______________________
< PLAY [Ansible Blocks] >
-----------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
_____________________________________________________________
< TASK [mainerrrole : ERR ROLE Failing intentionally by role] >
-------------------------------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
fatal: [localhost]: FAILED! => {"changed": true, "cmd": ["ls", "-l", "/tmp/does-not-exist"], "delta": "0:00:00.005622", "end": "2022-06-13 14:44:50.711257", "msg": "non-zero return code", "rc": 2, "start": "2022-06-13 14:44:50.705635", "stderr": "ls: cannot access '/tmp/does-not-exist': No such file or directory", "stderr_lines": ["ls: cannot access '/tmp/does-not-exist': No such file or directory"], "stdout": "", "stdout_lines": []}
_____________________________________________________
< TASK [okrole : OK ROLE List home directory content] >
-----------------------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
changed: [localhost]
________________________________________
< TASK [Rescue block (perform recovery)] >
----------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
ok: [localhost] => {
"msg": "Something went wrong, cleaning up.."
}
____________
< PLAY RECAP >
------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
localhost : ok=2 changed=1 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/78042
|
https://github.com/ansible/ansible/pull/78676
|
848143640ba88f34e6e952faba4e1b5fd1c1b2dd
|
fd19ff231055c439c6a2e9bb590fef09818b2afc
| 2022-06-13T04:58:40Z |
python
| 2022-09-06T15:11:49Z |
test/integration/targets/blocks/runme.sh
|
#!/usr/bin/env bash
set -eux
# This test does not use "$@" to avoid further increasing the verbosity beyond what is required for the test.
# Increasing verbosity from -vv to -vvv can increase the line count from ~400 to ~9K on our centos6 test container.
# remove old output log
rm -f block_test.out
# run the test and check to make sure the right number of completions was logged
ansible-playbook -vv main.yml -i ../../inventory | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
# cleanup the output log again, to make sure the test is clean
rm -f block_test.out block_test_wo_colors.out
# run test with free strategy and again count the completions
ansible-playbook -vv main.yml -i ../../inventory -e test_strategy=free | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
# cleanup the output log again, to make sure the test is clean
rm -f block_test.out block_test_wo_colors.out
# run test with host_pinned strategy and again count the completions
ansible-playbook -vv main.yml -i ../../inventory -e test_strategy=host_pinned | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
# run test that includes tasks that fail inside a block with always
rm -f block_test.out block_test_wo_colors.out
ansible-playbook -vv block_fail.yml -i ../../inventory | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
ansible-playbook -vv block_rescue_vars.yml
# https://github.com/ansible/ansible/issues/70000
set +e
exit_code=0
ansible-playbook -vv always_failure_with_rescue_rc.yml > rc_test.out || exit_code=$?
set -e
cat rc_test.out
[ $exit_code -eq 2 ]
[ "$(grep -c 'Failure in block' rc_test.out )" -eq 1 ]
[ "$(grep -c 'Rescue' rc_test.out )" -eq 1 ]
[ "$(grep -c 'Failure in always' rc_test.out )" -eq 1 ]
[ "$(grep -c 'DID NOT RUN' rc_test.out )" -eq 0 ]
rm -f rc_test_out
set +e
exit_code=0
ansible-playbook -vv always_no_rescue_rc.yml > rc_test.out || exit_code=$?
set -e
cat rc_test.out
[ $exit_code -eq 2 ]
[ "$(grep -c 'Failure in block' rc_test.out )" -eq 1 ]
[ "$(grep -c 'Always' rc_test.out )" -eq 1 ]
[ "$(grep -c 'DID NOT RUN' rc_test.out )" -eq 0 ]
rm -f rc_test.out
set +e
exit_code=0
ansible-playbook -vv always_failure_no_rescue_rc.yml > rc_test.out || exit_code=$?
set -e
cat rc_test.out
[ $exit_code -eq 2 ]
[ "$(grep -c 'Failure in block' rc_test.out )" -eq 1 ]
[ "$(grep -c 'Failure in always' rc_test.out )" -eq 1 ]
[ "$(grep -c 'DID NOT RUN' rc_test.out )" -eq 0 ]
rm -f rc_test.out
# https://github.com/ansible/ansible/issues/29047
ansible-playbook -vv issue29047.yml -i ../../inventory
# https://github.com/ansible/ansible/issues/61253
ansible-playbook -vv block_in_rescue.yml -i ../../inventory > rc_test.out
cat rc_test.out
[ "$(grep -c 'rescued=3' rc_test.out)" -eq 1 ]
[ "$(grep -c 'failed=0' rc_test.out)" -eq 1 ]
rm -f rc_test.out
# https://github.com/ansible/ansible/issues/71306
set +e
exit_code=0
ansible-playbook -i host1,host2 -vv issue71306.yml > rc_test.out || exit_code=$?
set -e
cat rc_test.out
[ $exit_code -eq 0 ]
rm -f rc_test_out
# https://github.com/ansible/ansible/issues/69848
ansible-playbook -i host1,host2 --tags foo -vv 69848.yml > role_complete_test.out
cat role_complete_test.out
[ "$(grep -c 'Tagged task' role_complete_test.out)" -eq 2 ]
[ "$(grep -c 'Not tagged task' role_complete_test.out)" -eq 0 ]
rm -f role_complete_test.out
# test notify inheritance
ansible-playbook inherit_notify.yml "$@"
ansible-playbook unsafe_failed_task.yml "$@"
ansible-playbook finalized_task.yml "$@"
# https://github.com/ansible/ansible/issues/72725
ansible-playbook -i host1,host2 -vv 72725.yml
# https://github.com/ansible/ansible/issues/72781
set +e
ansible-playbook -i host1,host2 -vv 72781.yml > 72781.out
set -e
cat 72781.out
[ "$(grep -c 'SHOULD NOT HAPPEN' 72781.out)" -eq 0 ]
rm -f 72781.out
set +e
ansible-playbook -i host1,host2 -vv 78612.yml | tee 78612.out
set -e
[ "$(grep -c 'PASSED' 78612.out)" -eq 1 ]
rm -f 78612.out
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 72,638 |
How to rescue and re-raise errors with context information
|
##### SUMMARY
There are cases where you are using a role from inside a block, and the role itself uses a block, and you want to propagate errors from the inner block to be handled in a `rescue` in an outer block, and you want access to the `ansible_failed_result` (and `ansible_failed_task` - but see https://github.com/ansible/ansible/issues/57399) from the inner block. This works fine if the inner block has no `rescue` or `always` clause - the error from the inner block is propagated to the outer block where it can be caught in a `rescue` clause with the expected information set in `ansible_failed_result`.
The problem arises when you want to use `always` or `rescue` in the inner block. The outer `rescue` clause is still called, which means an error was detected by Ansible and handled, but the context information is gone and `ansible_failed_result` is undefined. Same if a `rescue` is used in the inner block, with or without the `always`. You can re-raise the error with the `ansible_failed_result` by using it as the only value for a `fail` module `msg` argument:
```yaml
- block:
...
rescue:
- name: re-raise the error from the inner block
fail:
msg: "{{ ansible_failed_result }}"
```
This works in Ansible 2.9 and 2.10, but I don't know if it is "accidental" behavior or if this is the fully supported way to do this. I would like to see if this is officially supported by Ansible core and, if this is the correct way to "catch" and "re-raise" errors, it should be documented in the block/rescue/always documentation.
See also https://richm.github.io/how-to-catch-and-reraise-errors-in-ansible
##### ISSUE TYPE
- Documentation Report
##### COMPONENT NAME
https://docs.ansible.com/ansible/latest/user_guide/playbooks_blocks.html#
##### ANSIBLE VERSION
```
ansible 2.9.14
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/rmeggins/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.8.6 (default, Sep 25 2020, 00:00:00) [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
```
##### CONFIGURATION
```
no output?
```
##### OS / ENVIRONMENT
Fedora 32 - ansible 2.9 and 2.10
##### ADDITIONAL INFORMATION
https://richm.github.io/how-to-catch-and-reraise-errors-in-ansible
|
https://github.com/ansible/ansible/issues/72638
|
https://github.com/ansible/ansible/pull/78676
|
848143640ba88f34e6e952faba4e1b5fd1c1b2dd
|
fd19ff231055c439c6a2e9bb590fef09818b2afc
| 2020-11-16T19:03:36Z |
python
| 2022-09-06T15:11:49Z |
changelogs/fragments/43191-72638-ansible_failed_task-fixes.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 72,638 |
How to rescue and re-raise errors with context information
|
##### SUMMARY
There are cases where you are using a role from inside a block, and the role itself uses a block, and you want to propagate errors from the inner block to be handled in a `rescue` in an outer block, and you want access to the `ansible_failed_result` (and `ansible_failed_task` - but see https://github.com/ansible/ansible/issues/57399) from the inner block. This works fine if the inner block has no `rescue` or `always` clause - the error from the inner block is propagated to the outer block where it can be caught in a `rescue` clause with the expected information set in `ansible_failed_result`.
The problem arises when you want to use `always` or `rescue` in the inner block. The outer `rescue` clause is still called, which means an error was detected by Ansible and handled, but the context information is gone and `ansible_failed_result` is undefined. Same if a `rescue` is used in the inner block, with or without the `always`. You can re-raise the error with the `ansible_failed_result` by using it as the only value for a `fail` module `msg` argument:
```yaml
- block:
...
rescue:
- name: re-raise the error from the inner block
fail:
msg: "{{ ansible_failed_result }}"
```
This works in Ansible 2.9 and 2.10, but I don't know if it is "accidental" behavior or if this is the fully supported way to do this. I would like to see if this is officially supported by Ansible core and, if this is the correct way to "catch" and "re-raise" errors, it should be documented in the block/rescue/always documentation.
See also https://richm.github.io/how-to-catch-and-reraise-errors-in-ansible
##### ISSUE TYPE
- Documentation Report
##### COMPONENT NAME
https://docs.ansible.com/ansible/latest/user_guide/playbooks_blocks.html#
##### ANSIBLE VERSION
```
ansible 2.9.14
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/rmeggins/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.8.6 (default, Sep 25 2020, 00:00:00) [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
```
##### CONFIGURATION
```
no output?
```
##### OS / ENVIRONMENT
Fedora 32 - ansible 2.9 and 2.10
##### ADDITIONAL INFORMATION
https://richm.github.io/how-to-catch-and-reraise-errors-in-ansible
|
https://github.com/ansible/ansible/issues/72638
|
https://github.com/ansible/ansible/pull/78676
|
848143640ba88f34e6e952faba4e1b5fd1c1b2dd
|
fd19ff231055c439c6a2e9bb590fef09818b2afc
| 2020-11-16T19:03:36Z |
python
| 2022-09-06T15:11:49Z |
docs/docsite/rst/playbook_guide/playbooks_blocks.rst
|
.. _playbooks_blocks:
******
Blocks
******
Blocks create logical groups of tasks. Blocks also offer ways to handle task errors, similar to exception handling in many programming languages.
.. contents::
:local:
Grouping tasks with blocks
==========================
All tasks in a block inherit directives applied at the block level. Most of what you can apply to a single task (with the exception of loops) can be applied at the block level, so blocks make it much easier to set data or directives common to the tasks. The directive does not affect the block itself, it is only inherited by the tasks enclosed by a block. For example, a `when` statement is applied to the tasks within a block, not to the block itself.
.. code-block:: YAML
:emphasize-lines: 3
:caption: Block example with named tasks inside the block
tasks:
- name: Install, configure, and start Apache
block:
- name: Install httpd and memcached
ansible.builtin.yum:
name:
- httpd
- memcached
state: present
- name: Apply the foo config template
ansible.builtin.template:
src: templates/src.j2
dest: /etc/foo.conf
- name: Start service bar and enable it
ansible.builtin.service:
name: bar
state: started
enabled: True
when: ansible_facts['distribution'] == 'CentOS'
become: true
become_user: root
ignore_errors: yes
In the example above, the 'when' condition will be evaluated before Ansible runs each of the three tasks in the block. All three tasks also inherit the privilege escalation directives, running as the root user. Finally, ``ignore_errors: yes`` ensures that Ansible continues to execute the playbook even if some of the tasks fail.
Names for blocks have been available since Ansible 2.3. We recommend using names in all tasks, within blocks or elsewhere, for better visibility into the tasks being executed when you run the playbook.
.. _block_error_handling:
Handling errors with blocks
===========================
You can control how Ansible responds to task errors using blocks with ``rescue`` and ``always`` sections.
Rescue blocks specify tasks to run when an earlier task in a block fails. This approach is similar to exception handling in many programming languages. Ansible only runs rescue blocks after a task returns a 'failed' state. Bad task definitions and unreachable hosts will not trigger the rescue block.
.. _block_rescue:
.. code-block:: YAML
:emphasize-lines: 3,14
:caption: Block error handling example
tasks:
- name: Handle the error
block:
- name: Print a message
ansible.builtin.debug:
msg: 'I execute normally'
- name: Force a failure
ansible.builtin.command: /bin/false
- name: Never print this
ansible.builtin.debug:
msg: 'I never execute, due to the above task failing, :-('
rescue:
- name: Print when errors
ansible.builtin.debug:
msg: 'I caught an error, can do stuff here to fix it, :-)'
You can also add an ``always`` section to a block. Tasks in the ``always`` section run no matter what the task status of the previous block is.
.. _block_always:
.. code-block:: YAML
:emphasize-lines: 2,13
:caption: Block with always section
- name: Always do X
block:
- name: Print a message
ansible.builtin.debug:
msg: 'I execute normally'
- name: Force a failure
ansible.builtin.command: /bin/false
- name: Never print this
ansible.builtin.debug:
msg: 'I never execute :-('
always:
- name: Always do this
ansible.builtin.debug:
msg: "This always executes, :-)"
Together, these elements offer complex error handling.
.. code-block:: YAML
:emphasize-lines: 2,13,24
:caption: Block with all sections
- name: Attempt and graceful roll back demo
block:
- name: Print a message
ansible.builtin.debug:
msg: 'I execute normally'
- name: Force a failure
ansible.builtin.command: /bin/false
- name: Never print this
ansible.builtin.debug:
msg: 'I never execute, due to the above task failing, :-('
rescue:
- name: Print when errors
ansible.builtin.debug:
msg: 'I caught an error'
- name: Force a failure in middle of recovery! >:-)
ansible.builtin.command: /bin/false
- name: Never print this
ansible.builtin.debug:
msg: 'I also never execute :-('
always:
- name: Always do this
ansible.builtin.debug:
msg: "This always executes"
The tasks in the ``block`` execute normally. If any tasks in the block return ``failed``, the ``rescue`` section executes tasks to recover from the error. The ``always`` section runs regardless of the results of the ``block`` and ``rescue`` sections.
If an error occurs in the block and the rescue task succeeds, Ansible reverts the failed status of the original task for the run and continues to run the play as if the original task had succeeded. The rescued task is considered successful, and does not trigger ``max_fail_percentage`` or ``any_errors_fatal`` configurations. However, Ansible still reports a failure in the playbook statistics.
You can use blocks with ``flush_handlers`` in a rescue task to ensure that all handlers run even if an error occurs:
.. code-block:: YAML
:emphasize-lines: 3,12
:caption: Block run handlers in error handling
tasks:
- name: Attempt and graceful roll back demo
block:
- name: Print a message
ansible.builtin.debug:
msg: 'I execute normally'
changed_when: yes
notify: run me even after an error
- name: Force a failure
ansible.builtin.command: /bin/false
rescue:
- name: Make sure all handlers run
meta: flush_handlers
handlers:
- name: Run me even after an error
ansible.builtin.debug:
msg: 'This handler runs even on error'
.. versionadded:: 2.1
Ansible provides a couple of variables for tasks in the ``rescue`` portion of a block:
ansible_failed_task
The task that returned 'failed' and triggered the rescue. For example, to get the name use ``ansible_failed_task.name``.
ansible_failed_result
The captured return result of the failed task that triggered the rescue. This would equate to having used this var in the ``register`` keyword.
.. seealso::
:ref:`playbooks_intro`
An introduction to playbooks
:ref:`playbooks_reuse_roles`
Playbook organization by roles
`User Mailing List <https://groups.google.com/group/ansible-devel>`_
Have a question? Stop by the google group!
:ref:`communication_irc`
How to join Ansible chat channels
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 72,638 |
How to rescue and re-raise errors with context information
|
##### SUMMARY
There are cases where you are using a role from inside a block, and the role itself uses a block, and you want to propagate errors from the inner block to be handled in a `rescue` in an outer block, and you want access to the `ansible_failed_result` (and `ansible_failed_task` - but see https://github.com/ansible/ansible/issues/57399) from the inner block. This works fine if the inner block has no `rescue` or `always` clause - the error from the inner block is propagated to the outer block where it can be caught in a `rescue` clause with the expected information set in `ansible_failed_result`.
The problem arises when you want to use `always` or `rescue` in the inner block. The outer `rescue` clause is still called, which means an error was detected by Ansible and handled, but the context information is gone and `ansible_failed_result` is undefined. Same if a `rescue` is used in the inner block, with or without the `always`. You can re-raise the error with the `ansible_failed_result` by using it as the only value for a `fail` module `msg` argument:
```yaml
- block:
...
rescue:
- name: re-raise the error from the inner block
fail:
msg: "{{ ansible_failed_result }}"
```
This works in Ansible 2.9 and 2.10, but I don't know if it is "accidental" behavior or if this is the fully supported way to do this. I would like to see if this is officially supported by Ansible core and, if this is the correct way to "catch" and "re-raise" errors, it should be documented in the block/rescue/always documentation.
See also https://richm.github.io/how-to-catch-and-reraise-errors-in-ansible
##### ISSUE TYPE
- Documentation Report
##### COMPONENT NAME
https://docs.ansible.com/ansible/latest/user_guide/playbooks_blocks.html#
##### ANSIBLE VERSION
```
ansible 2.9.14
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/rmeggins/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.8.6 (default, Sep 25 2020, 00:00:00) [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
```
##### CONFIGURATION
```
no output?
```
##### OS / ENVIRONMENT
Fedora 32 - ansible 2.9 and 2.10
##### ADDITIONAL INFORMATION
https://richm.github.io/how-to-catch-and-reraise-errors-in-ansible
|
https://github.com/ansible/ansible/issues/72638
|
https://github.com/ansible/ansible/pull/78676
|
848143640ba88f34e6e952faba4e1b5fd1c1b2dd
|
fd19ff231055c439c6a2e9bb590fef09818b2afc
| 2020-11-16T19:03:36Z |
python
| 2022-09-06T15:11:49Z |
lib/ansible/executor/play_iterator.py
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
from enum import IntEnum, IntFlag
from ansible import constants as C
from ansible.errors import AnsibleAssertionError
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.utils.display import Display
display = Display()
__all__ = ['PlayIterator', 'IteratingStates', 'FailedStates']
class IteratingStates(IntEnum):
SETUP = 0
TASKS = 1
RESCUE = 2
ALWAYS = 3
HANDLERS = 4
COMPLETE = 5
class FailedStates(IntFlag):
NONE = 0
SETUP = 1
TASKS = 2
RESCUE = 4
ALWAYS = 8
HANDLERS = 16
class HostState:
def __init__(self, blocks):
self._blocks = blocks[:]
self.handlers = []
self.cur_block = 0
self.cur_regular_task = 0
self.cur_rescue_task = 0
self.cur_always_task = 0
self.cur_handlers_task = 0
self.run_state = IteratingStates.SETUP
self.fail_state = FailedStates.NONE
self.pre_flushing_run_state = None
self.update_handlers = True
self.pending_setup = False
self.tasks_child_state = None
self.rescue_child_state = None
self.always_child_state = None
self.did_rescue = False
self.did_start_at_task = False
def __repr__(self):
return "HostState(%r)" % self._blocks
def __str__(self):
return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, handlers=%d, run_state=%s, fail_state=%s, "
"pre_flushing_run_state=%s, update_handlers=%s, pending_setup=%s, "
"tasks child state? (%s), rescue child state? (%s), always child state? (%s), "
"did rescue? %s, did start at task? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
self.cur_always_task,
self.cur_handlers_task,
self.run_state,
self.fail_state,
self.pre_flushing_run_state,
self.update_handlers,
self.pending_setup,
self.tasks_child_state,
self.rescue_child_state,
self.always_child_state,
self.did_rescue,
self.did_start_at_task,
))
def __eq__(self, other):
if not isinstance(other, HostState):
return False
for attr in ('_blocks',
'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task', 'cur_handlers_task',
'run_state', 'fail_state', 'pre_flushing_run_state', 'update_handlers', 'pending_setup',
'tasks_child_state', 'rescue_child_state', 'always_child_state'):
if getattr(self, attr) != getattr(other, attr):
return False
return True
def get_current_block(self):
return self._blocks[self.cur_block]
def copy(self):
new_state = HostState(self._blocks)
new_state.handlers = self.handlers[:]
new_state.cur_block = self.cur_block
new_state.cur_regular_task = self.cur_regular_task
new_state.cur_rescue_task = self.cur_rescue_task
new_state.cur_always_task = self.cur_always_task
new_state.cur_handlers_task = self.cur_handlers_task
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pre_flushing_run_state = self.pre_flushing_run_state
new_state.update_handlers = self.update_handlers
new_state.pending_setup = self.pending_setup
new_state.did_rescue = self.did_rescue
new_state.did_start_at_task = self.did_start_at_task
if self.tasks_child_state is not None:
new_state.tasks_child_state = self.tasks_child_state.copy()
if self.rescue_child_state is not None:
new_state.rescue_child_state = self.rescue_child_state.copy()
if self.always_child_state is not None:
new_state.always_child_state = self.always_child_state.copy()
return new_state
class PlayIterator:
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
self._play = play
self._blocks = []
self._variable_manager = variable_manager
setup_block = Block(play=self._play)
# Gathering facts with run_once would copy the facts from one host to
# the others.
setup_block.run_once = False
setup_task = Task(block=setup_block)
setup_task.action = 'gather_facts'
# TODO: hardcoded resolution here, but should use actual resolution code in the end,
# in case of 'legacy' mismatch
setup_task.resolved_action = 'ansible.builtin.gather_facts'
setup_task.name = 'Gathering Facts'
setup_task.args = {}
# Unless play is specifically tagged, gathering should 'always' run
if not self._play.tags:
setup_task.tags = ['always']
# Default options to gather
for option in ('gather_subset', 'gather_timeout', 'fact_path'):
value = getattr(self._play, option, None)
if value is not None:
setup_task.args[option] = value
setup_task.set_loader(self._play._loader)
# short circuit fact gathering if the entire playbook is conditional
if self._play._included_conditional is not None:
setup_task.when = self._play._included_conditional[:]
setup_block.block = [setup_task]
setup_block = setup_block.filter_tagged_tasks(all_vars)
self._blocks.append(setup_block)
# keep flatten (no blocks) list of all tasks from the play
# used for the lockstep mechanism in the linear strategy
self.all_tasks = setup_block.get_tasks()
for block in self._play.compile():
new_block = block.filter_tagged_tasks(all_vars)
if new_block.has_tasks():
self._blocks.append(new_block)
self.all_tasks.extend(new_block.get_tasks())
# keep list of all handlers, it is copied into each HostState
# at the beginning of IteratingStates.HANDLERS
# the copy happens at each flush in order to restore the original
# list and remove any included handlers that might not be notified
# at the particular flush
self.handlers = [h for b in self._play.handlers for h in b.block]
self._host_states = {}
start_at_matched = False
batch = inventory.get_hosts(self._play.hosts, order=self._play.order)
self.batch_size = len(batch)
for host in batch:
self.set_state_for_host(host.name, HostState(blocks=self._blocks))
# if we're looking to start at a specific task, iterate through
# the tasks for this host until we find the specified task
if play_context.start_at_task is not None and not start_at_done:
while True:
(s, task) = self.get_next_task_for_host(host, peek=True)
if s.run_state == IteratingStates.COMPLETE:
break
if task.name == play_context.start_at_task or (task.name and fnmatch.fnmatch(task.name, play_context.start_at_task)) or \
task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
start_at_matched = True
break
self.set_state_for_host(host.name, s)
# finally, reset the host's state to IteratingStates.SETUP
if start_at_matched:
self._host_states[host.name].did_start_at_task = True
self._host_states[host.name].run_state = IteratingStates.SETUP
if start_at_matched:
# we have our match, so clear the start_at_task field on the
# play context to flag that we've started at a task (and future
# plays won't try to advance)
play_context.start_at_task = None
self.end_play = False
self.cur_task = 0
def get_host_state(self, host):
# Since we're using the PlayIterator to carry forward failed hosts,
# in the event that a previous host was not in the current inventory
# we create a stub state for it now
if host.name not in self._host_states:
self.set_state_for_host(host.name, HostState(blocks=[]))
return self._host_states[host.name].copy()
def cache_block_tasks(self, block):
display.deprecated(
'PlayIterator.cache_block_tasks is now noop due to the changes '
'in the way tasks are cached and is deprecated.',
version=2.16
)
def get_next_task_for_host(self, host, peek=False):
display.debug("getting the next task for host %s" % host.name)
s = self.get_host_state(host)
task = None
if s.run_state == IteratingStates.COMPLETE:
display.debug("host %s is done iterating, returning" % host.name)
return (s, None)
(s, task) = self._get_next_task_from_state(s, host=host)
if not peek:
self.set_state_for_host(host.name, s)
display.debug("done getting next task for host %s" % host.name)
display.debug(" ^ task is: %s" % task)
display.debug(" ^ state is: %s" % s)
return (s, task)
def _get_next_task_from_state(self, state, host):
task = None
# try and find the next task, given the current state.
while True:
# try to get the current block from the list of blocks, and
# if we run past the end of the list we know we're done with
# this block
try:
block = state._blocks[state.cur_block]
except IndexError:
state.run_state = IteratingStates.COMPLETE
return (state, None)
if state.run_state == IteratingStates.SETUP:
# First, we check to see if we were pending setup. If not, this is
# the first trip through IteratingStates.SETUP, so we set the pending_setup
# flag and try to determine if we do in fact want to gather facts for
# the specified host.
if not state.pending_setup:
state.pending_setup = True
# Gather facts if the default is 'smart' and we have not yet
# done it for this host; or if 'explicit' and the play sets
# gather_facts to True; or if 'implicit' and the play does
# NOT explicitly set gather_facts to False.
gathering = C.DEFAULT_GATHERING
implied = self._play.gather_facts is None or boolean(self._play.gather_facts, strict=False)
if (gathering == 'implicit' and implied) or \
(gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \
(gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('_ansible_facts_gathered', False))):
# The setup block is always self._blocks[0], as we inject it
# during the play compilation in __init__ above.
setup_block = self._blocks[0]
if setup_block.has_tasks() and len(setup_block.block) > 0:
task = setup_block.block[0]
else:
# This is the second trip through IteratingStates.SETUP, so we clear
# the flag and move onto the next block in the list while setting
# the run state to IteratingStates.TASKS
state.pending_setup = False
state.run_state = IteratingStates.TASKS
if not state.did_start_at_task:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
elif state.run_state == IteratingStates.TASKS:
# clear the pending setup flag, since we're past that and it didn't fail
if state.pending_setup:
state.pending_setup = False
# First, we check for a child task state that is not failed, and if we
# have one recurse into it for the next task. If we're done with the child
# state, we clear it and drop back to getting the next task from the list.
if state.tasks_child_state:
(state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host)
if self._check_failed_state(state.tasks_child_state):
# failed child state, so clear it and move into the rescue portion
state.tasks_child_state = None
self._set_failed_state(state)
else:
# get the next task recursively
if task is None or state.tasks_child_state.run_state == IteratingStates.COMPLETE:
# we're done with the child state, so clear it and continue
# back to the top of the loop to get the next task
state.tasks_child_state = None
continue
else:
# First here, we check to see if we've failed anywhere down the chain
# of states we have, and if so we move onto the rescue portion. Otherwise,
# we check to see if we've moved past the end of the list of tasks. If so,
# we move into the always portion of the block, otherwise we get the next
# task from the list.
if self._check_failed_state(state):
state.run_state = IteratingStates.RESCUE
elif state.cur_regular_task >= len(block.block):
state.run_state = IteratingStates.ALWAYS
else:
task = block.block[state.cur_regular_task]
# if the current task is actually a child block, create a child
# state for us to recurse into on the next pass
if isinstance(task, Block):
state.tasks_child_state = HostState(blocks=[task])
state.tasks_child_state.run_state = IteratingStates.TASKS
# since we've created the child state, clear the task
# so we can pick up the child state on the next pass
task = None
state.cur_regular_task += 1
elif state.run_state == IteratingStates.RESCUE:
# The process here is identical to IteratingStates.TASKS, except instead
# we move into the always portion of the block.
if state.rescue_child_state:
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host)
if self._check_failed_state(state.rescue_child_state):
state.rescue_child_state = None
self._set_failed_state(state)
else:
if task is None or state.rescue_child_state.run_state == IteratingStates.COMPLETE:
state.rescue_child_state = None
continue
else:
if state.fail_state & FailedStates.RESCUE == FailedStates.RESCUE:
state.run_state = IteratingStates.ALWAYS
elif state.cur_rescue_task >= len(block.rescue):
if len(block.rescue) > 0:
state.fail_state = FailedStates.NONE
state.run_state = IteratingStates.ALWAYS
state.did_rescue = True
else:
task = block.rescue[state.cur_rescue_task]
if isinstance(task, Block):
state.rescue_child_state = HostState(blocks=[task])
state.rescue_child_state.run_state = IteratingStates.TASKS
task = None
state.cur_rescue_task += 1
elif state.run_state == IteratingStates.ALWAYS:
# And again, the process here is identical to IteratingStates.TASKS, except
# instead we either move onto the next block in the list, or we set the
# run state to IteratingStates.COMPLETE in the event of any errors, or when we
# have hit the end of the list of blocks.
if state.always_child_state:
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host)
if self._check_failed_state(state.always_child_state):
state.always_child_state = None
self._set_failed_state(state)
else:
if task is None or state.always_child_state.run_state == IteratingStates.COMPLETE:
state.always_child_state = None
continue
else:
if state.cur_always_task >= len(block.always):
if state.fail_state != FailedStates.NONE:
state.run_state = IteratingStates.COMPLETE
else:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.run_state = IteratingStates.TASKS
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
state.did_rescue = False
else:
task = block.always[state.cur_always_task]
if isinstance(task, Block):
state.always_child_state = HostState(blocks=[task])
state.always_child_state.run_state = IteratingStates.TASKS
task = None
state.cur_always_task += 1
elif state.run_state == IteratingStates.HANDLERS:
if state.update_handlers:
# reset handlers for HostState since handlers from include_tasks
# might be there from previous flush
state.handlers = self.handlers[:]
state.update_handlers = False
state.cur_handlers_task = 0
if state.fail_state & FailedStates.HANDLERS == FailedStates.HANDLERS:
state.update_handlers = True
state.run_state = IteratingStates.COMPLETE
else:
while True:
try:
task = state.handlers[state.cur_handlers_task]
except IndexError:
task = None
state.run_state = state.pre_flushing_run_state
state.update_handlers = True
break
else:
state.cur_handlers_task += 1
if task.is_host_notified(host):
break
elif state.run_state == IteratingStates.COMPLETE:
return (state, None)
# if something above set the task, break out of the loop now
if task:
break
return (state, task)
def _set_failed_state(self, state):
if state.run_state == IteratingStates.SETUP:
state.fail_state |= FailedStates.SETUP
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.TASKS:
if state.tasks_child_state is not None:
state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
else:
state.fail_state |= FailedStates.TASKS
if state._blocks[state.cur_block].rescue:
state.run_state = IteratingStates.RESCUE
elif state._blocks[state.cur_block].always:
state.run_state = IteratingStates.ALWAYS
else:
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.RESCUE:
if state.rescue_child_state is not None:
state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
else:
state.fail_state |= FailedStates.RESCUE
if state._blocks[state.cur_block].always:
state.run_state = IteratingStates.ALWAYS
else:
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.ALWAYS:
if state.always_child_state is not None:
state.always_child_state = self._set_failed_state(state.always_child_state)
else:
state.fail_state |= FailedStates.ALWAYS
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.HANDLERS:
state.fail_state |= FailedStates.HANDLERS
state.update_handlers = True
if state._blocks[state.cur_block].rescue:
state.run_state = IteratingStates.RESCUE
elif state._blocks[state.cur_block].always:
state.run_state = IteratingStates.ALWAYS
else:
state.run_state = IteratingStates.COMPLETE
return state
def mark_host_failed(self, host):
s = self.get_host_state(host)
display.debug("marking host %s failed, current state: %s" % (host, s))
s = self._set_failed_state(s)
display.debug("^ failed state is now: %s" % s)
self.set_state_for_host(host.name, s)
self._play._removed_hosts.append(host.name)
def get_failed_hosts(self):
return dict((host, True) for (host, state) in self._host_states.items() if self._check_failed_state(state))
def _check_failed_state(self, state):
if state is None:
return False
elif state.run_state == IteratingStates.RESCUE and self._check_failed_state(state.rescue_child_state):
return True
elif state.run_state == IteratingStates.ALWAYS and self._check_failed_state(state.always_child_state):
return True
elif state.run_state == IteratingStates.HANDLERS and state.fail_state & FailedStates.HANDLERS == FailedStates.HANDLERS:
return True
elif state.fail_state != FailedStates.NONE:
if state.run_state == IteratingStates.RESCUE and state.fail_state & FailedStates.RESCUE == 0:
return False
elif state.run_state == IteratingStates.ALWAYS and state.fail_state & FailedStates.ALWAYS == 0:
return False
else:
return not (state.did_rescue and state.fail_state & FailedStates.ALWAYS == 0)
elif state.run_state == IteratingStates.TASKS and self._check_failed_state(state.tasks_child_state):
cur_block = state._blocks[state.cur_block]
if len(cur_block.rescue) > 0 and state.fail_state & FailedStates.RESCUE == 0:
return False
else:
return True
return False
def is_failed(self, host):
s = self.get_host_state(host)
return self._check_failed_state(s)
def clear_host_errors(self, host):
self._clear_state_errors(self.get_state_for_host(host.name))
def _clear_state_errors(self, state: HostState) -> None:
state.fail_state = FailedStates.NONE
if state.tasks_child_state is not None:
self._clear_state_errors(state.tasks_child_state)
elif state.rescue_child_state is not None:
self._clear_state_errors(state.rescue_child_state)
elif state.always_child_state is not None:
self._clear_state_errors(state.always_child_state)
def get_active_state(self, state):
'''
Finds the active state, recursively if necessary when there are child states.
'''
if state.run_state == IteratingStates.TASKS and state.tasks_child_state is not None:
return self.get_active_state(state.tasks_child_state)
elif state.run_state == IteratingStates.RESCUE and state.rescue_child_state is not None:
return self.get_active_state(state.rescue_child_state)
elif state.run_state == IteratingStates.ALWAYS and state.always_child_state is not None:
return self.get_active_state(state.always_child_state)
return state
def is_any_block_rescuing(self, state):
'''
Given the current HostState state, determines if the current block, or any child blocks,
are in rescue mode.
'''
if state.run_state == IteratingStates.RESCUE:
return True
if state.tasks_child_state is not None:
return self.is_any_block_rescuing(state.tasks_child_state)
return False
def get_original_task(self, host, task):
display.deprecated(
'PlayIterator.get_original_task is now noop due to the changes '
'in the way tasks are cached and is deprecated.',
version=2.16
)
return (None, None)
def _insert_tasks_into_state(self, state, task_list):
# if we've failed at all, or if the task list is empty, just return the current state
if (state.fail_state != FailedStates.NONE and state.run_state == IteratingStates.TASKS) or not task_list:
return state
if state.run_state == IteratingStates.TASKS:
if state.tasks_child_state:
state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.block[:state.cur_regular_task]
after = target_block.block[state.cur_regular_task:]
target_block.block = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.RESCUE:
if state.rescue_child_state:
state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.rescue[:state.cur_rescue_task]
after = target_block.rescue[state.cur_rescue_task:]
target_block.rescue = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.ALWAYS:
if state.always_child_state:
state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.always[:state.cur_always_task]
after = target_block.always[state.cur_always_task:]
target_block.always = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.HANDLERS:
state.handlers[state.cur_handlers_task:state.cur_handlers_task] = [h for b in task_list for h in b.block]
return state
def add_tasks(self, host, task_list):
self.set_state_for_host(host.name, self._insert_tasks_into_state(self.get_host_state(host), task_list))
@property
def host_states(self):
return self._host_states
def get_state_for_host(self, hostname: str) -> HostState:
return self._host_states[hostname]
def set_state_for_host(self, hostname: str, state: HostState) -> None:
if not isinstance(state, HostState):
raise AnsibleAssertionError('Expected state to be a HostState but was a %s' % type(state))
self._host_states[hostname] = state
def set_run_state_for_host(self, hostname: str, run_state: IteratingStates) -> None:
if not isinstance(run_state, IteratingStates):
raise AnsibleAssertionError('Expected run_state to be a IteratingStates but was %s' % (type(run_state)))
self._host_states[hostname].run_state = run_state
def set_fail_state_for_host(self, hostname: str, fail_state: FailedStates) -> None:
if not isinstance(fail_state, FailedStates):
raise AnsibleAssertionError('Expected fail_state to be a FailedStates but was %s' % (type(fail_state)))
self._host_states[hostname].fail_state = fail_state
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 72,638 |
How to rescue and re-raise errors with context information
|
##### SUMMARY
There are cases where you are using a role from inside a block, and the role itself uses a block, and you want to propagate errors from the inner block to be handled in a `rescue` in an outer block, and you want access to the `ansible_failed_result` (and `ansible_failed_task` - but see https://github.com/ansible/ansible/issues/57399) from the inner block. This works fine if the inner block has no `rescue` or `always` clause - the error from the inner block is propagated to the outer block where it can be caught in a `rescue` clause with the expected information set in `ansible_failed_result`.
The problem arises when you want to use `always` or `rescue` in the inner block. The outer `rescue` clause is still called, which means an error was detected by Ansible and handled, but the context information is gone and `ansible_failed_result` is undefined. Same if a `rescue` is used in the inner block, with or without the `always`. You can re-raise the error with the `ansible_failed_result` by using it as the only value for a `fail` module `msg` argument:
```yaml
- block:
...
rescue:
- name: re-raise the error from the inner block
fail:
msg: "{{ ansible_failed_result }}"
```
This works in Ansible 2.9 and 2.10, but I don't know if it is "accidental" behavior or if this is the fully supported way to do this. I would like to see if this is officially supported by Ansible core and, if this is the correct way to "catch" and "re-raise" errors, it should be documented in the block/rescue/always documentation.
See also https://richm.github.io/how-to-catch-and-reraise-errors-in-ansible
##### ISSUE TYPE
- Documentation Report
##### COMPONENT NAME
https://docs.ansible.com/ansible/latest/user_guide/playbooks_blocks.html#
##### ANSIBLE VERSION
```
ansible 2.9.14
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/rmeggins/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.8.6 (default, Sep 25 2020, 00:00:00) [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
```
##### CONFIGURATION
```
no output?
```
##### OS / ENVIRONMENT
Fedora 32 - ansible 2.9 and 2.10
##### ADDITIONAL INFORMATION
https://richm.github.io/how-to-catch-and-reraise-errors-in-ansible
|
https://github.com/ansible/ansible/issues/72638
|
https://github.com/ansible/ansible/pull/78676
|
848143640ba88f34e6e952faba4e1b5fd1c1b2dd
|
fd19ff231055c439c6a2e9bb590fef09818b2afc
| 2020-11-16T19:03:36Z |
python
| 2022-09-06T15:11:49Z |
lib/ansible/plugins/strategy/__init__.py
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import cmd
import functools
import os
import pprint
import queue
import sys
import threading
import time
from collections import deque
from multiprocessing import Lock
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleUndefinedVariable, AnsibleParserError
from ansible.executor import action_write_locks
from ansible.executor.play_iterator import IteratingStates
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_result import TaskResult
from ansible.executor.task_queue_manager import CallbackSend, DisplaySend
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.playbook.conditional import Conditional
from ansible.playbook.handler import Handler
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.plugins import loader as plugin_loader
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.fqcn import add_internal_fqcns
from ansible.utils.unsafe_proxy import wrap_var
from ansible.utils.vars import combine_vars
from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
display = Display()
__all__ = ['StrategyBase']
# This list can be an exact match, or start of string bound
# does not accept regex
ALWAYS_DELEGATE_FACT_PREFIXES = frozenset((
'discovered_interpreter_',
))
class StrategySentinel:
pass
_sentinel = StrategySentinel()
def post_process_whens(result, task, templar, task_vars):
cond = None
if task.changed_when:
with templar.set_temporary_context(available_variables=task_vars):
cond = Conditional(loader=templar._loader)
cond.when = task.changed_when
result['changed'] = cond.evaluate_conditional(templar, templar.available_variables)
if task.failed_when:
with templar.set_temporary_context(available_variables=task_vars):
if cond is None:
cond = Conditional(loader=templar._loader)
cond.when = task.failed_when
failed_when_result = cond.evaluate_conditional(templar, templar.available_variables)
result['failed_when_result'] = result['failed'] = failed_when_result
def _get_item_vars(result, task):
item_vars = {}
if task.loop or task.loop_with:
loop_var = result.get('ansible_loop_var', 'item')
index_var = result.get('ansible_index_var')
if loop_var in result:
item_vars[loop_var] = result[loop_var]
if index_var and index_var in result:
item_vars[index_var] = result[index_var]
if '_ansible_item_label' in result:
item_vars['_ansible_item_label'] = result['_ansible_item_label']
if 'ansible_loop' in result:
item_vars['ansible_loop'] = result['ansible_loop']
return item_vars
def results_thread_main(strategy):
while True:
try:
result = strategy._final_q.get()
if isinstance(result, StrategySentinel):
break
elif isinstance(result, DisplaySend):
display.display(*result.args, **result.kwargs)
elif isinstance(result, CallbackSend):
for arg in result.args:
if isinstance(arg, TaskResult):
strategy.normalize_task_result(arg)
break
strategy._tqm.send_callback(result.method_name, *result.args, **result.kwargs)
elif isinstance(result, TaskResult):
strategy.normalize_task_result(result)
with strategy._results_lock:
strategy._results.append(result)
else:
display.warning('Received an invalid object (%s) in the result queue: %r' % (type(result), result))
except (IOError, EOFError):
break
except queue.Empty:
pass
def debug_closure(func):
"""Closure to wrap ``StrategyBase._process_pending_results`` and invoke the task debugger"""
@functools.wraps(func)
def inner(self, iterator, one_pass=False, max_passes=None):
status_to_stats_map = (
('is_failed', 'failures'),
('is_unreachable', 'dark'),
('is_changed', 'changed'),
('is_skipped', 'skipped'),
)
# We don't know the host yet, copy the previous states, for lookup after we process new results
prev_host_states = iterator.host_states.copy()
results = func(self, iterator, one_pass=one_pass, max_passes=max_passes)
_processed_results = []
for result in results:
task = result._task
host = result._host
_queued_task_args = self._queued_task_cache.pop((host.name, task._uuid), None)
task_vars = _queued_task_args['task_vars']
play_context = _queued_task_args['play_context']
# Try to grab the previous host state, if it doesn't exist use get_host_state to generate an empty state
try:
prev_host_state = prev_host_states[host.name]
except KeyError:
prev_host_state = iterator.get_host_state(host)
while result.needs_debugger(globally_enabled=self.debugger_active):
next_action = NextAction()
dbg = Debugger(task, host, task_vars, play_context, result, next_action)
dbg.cmdloop()
if next_action.result == NextAction.REDO:
# rollback host state
self._tqm.clear_failed_hosts()
if task.run_once and iterator._play.strategy in add_internal_fqcns(('linear',)) and result.is_failed():
for host_name, state in prev_host_states.items():
if host_name == host.name:
continue
iterator.set_state_for_host(host_name, state)
iterator._play._removed_hosts.remove(host_name)
iterator.set_state_for_host(host.name, prev_host_state)
for method, what in status_to_stats_map:
if getattr(result, method)():
self._tqm._stats.decrement(what, host.name)
self._tqm._stats.decrement('ok', host.name)
# redo
self._queue_task(host, task, task_vars, play_context)
_processed_results.extend(debug_closure(func)(self, iterator, one_pass))
break
elif next_action.result == NextAction.CONTINUE:
_processed_results.append(result)
break
elif next_action.result == NextAction.EXIT:
# Matches KeyboardInterrupt from bin/ansible
sys.exit(99)
else:
_processed_results.append(result)
return _processed_results
return inner
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
# by default, strategies should support throttling but we allow individual
# strategies to disable this and either forego supporting it or managing
# the throttling internally (as `free` does)
ALLOW_BASE_THROTTLING = True
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm._workers
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
self._step = context.CLIARGS.get('step', False)
self._diff = context.CLIARGS.get('diff', False)
# the task cache is a dictionary of tuples of (host.name, task._uuid)
# used to find the original task object of in-flight tasks and to store
# the task args/vars and play context info used to queue the task.
self._queued_task_cache = {}
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
# internal counters
self._pending_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
self._results = deque()
self._results_lock = threading.Condition(threading.Lock())
# create the result processing thread for reading results in the background
self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
self._results_thread.daemon = True
self._results_thread.start()
# holds the list of active (persistent) connections to be shutdown at
# play completion
self._active_connections = dict()
# Caches for get_host calls, to avoid calling excessively
# These values should be set at the top of the ``run`` method of each
# strategy plugin. Use ``_set_hosts_cache`` to set these values
self._hosts_cache = []
self._hosts_cache_all = []
self.debugger_active = C.ENABLE_TASK_DEBUGGER
def _set_hosts_cache(self, play, refresh=True):
"""Responsible for setting _hosts_cache and _hosts_cache_all
See comment in ``__init__`` for the purpose of these caches
"""
if not refresh and all((self._hosts_cache, self._hosts_cache_all)):
return
if not play.finalized and Templar(None).is_template(play.hosts):
_pattern = 'all'
else:
_pattern = play.hosts or 'all'
self._hosts_cache_all = [h.name for h in self._inventory.get_hosts(pattern=_pattern, ignore_restrictions=True)]
self._hosts_cache = [h.name for h in self._inventory.get_hosts(play.hosts, order=play.order)]
def cleanup(self):
# close active persistent connections
for sock in self._active_connections.values():
try:
conn = Connection(sock)
conn.reset()
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
self._final_q.put(_sentinel)
self._results_thread.join()
def run(self, iterator, play_context, result=0):
# execute one more pass through the iterator without peeking, to
# make sure that all of the hosts are advanced to their final task.
# This should be safe, as everything should be IteratingStates.COMPLETE by
# this point, though the strategy may not advance the hosts itself.
for host in self._hosts_cache:
if host not in self._tqm._unreachable_hosts:
try:
iterator.get_next_task_for_host(self._inventory.hosts[host])
except KeyError:
iterator.get_next_task_for_host(self._inventory.get_host(host))
# return the appropriate code, depending on the status hosts after the run
if not isinstance(result, bool) and result != self._tqm.RUN_OK:
return result
elif len(self._tqm._unreachable_hosts.keys()) > 0:
return self._tqm.RUN_UNREACHABLE_HOSTS
elif len(iterator.get_failed_hosts()) > 0:
return self._tqm.RUN_FAILED_HOSTS
else:
return self._tqm.RUN_OK
def get_hosts_remaining(self, play):
self._set_hosts_cache(play, refresh=False)
ignore = set(self._tqm._failed_hosts).union(self._tqm._unreachable_hosts)
return [host for host in self._hosts_cache if host not in ignore]
def get_failed_hosts(self, play):
self._set_hosts_cache(play, refresh=False)
return [host for host in self._hosts_cache if host in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
'''
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
'''
vars['ansible_current_hosts'] = self.get_hosts_remaining(play)
vars['ansible_failed_hosts'] = self.get_failed_hosts(play)
def _queue_task(self, host, task, task_vars, play_context):
''' handles queueing the task up to be sent to a worker '''
display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
# Add a write lock for tasks.
# Maybe this should be added somewhere further up the call stack but
# this is the earliest in the code where we have task (1) extracted
# into its own variable and (2) there's only a single code path
# leading to the module being run. This is called by two
# functions: linear.py::run(), and
# free.py::run() so we'd have to add to both to do it there.
# The next common higher level is __init__.py::run() and that has
# tasks inside of play_iterator so we'd have to extract them to do it
# there.
if task.action not in action_write_locks.action_write_locks:
display.debug('Creating lock for %s' % task.action)
action_write_locks.action_write_locks[task.action] = Lock()
# create a templar and template things we need later for the queuing process
templar = Templar(loader=self._loader, variables=task_vars)
try:
throttle = int(templar.template(task.throttle))
except Exception as e:
raise AnsibleError("Failed to convert the throttle value to an integer.", obj=task._ds, orig_exc=e)
# and then queue the new task
try:
# Determine the "rewind point" of the worker list. This means we start
# iterating over the list of workers until the end of the list is found.
# Normally, that is simply the length of the workers list (as determined
# by the forks or serial setting), however a task/block/play may "throttle"
# that limit down.
rewind_point = len(self._workers)
if throttle > 0 and self.ALLOW_BASE_THROTTLING:
if task.run_once:
display.debug("Ignoring 'throttle' as 'run_once' is also set for '%s'" % task.get_name())
else:
if throttle <= rewind_point:
display.debug("task: %s, throttle: %d" % (task.get_name(), throttle))
rewind_point = throttle
queued = False
starting_worker = self._cur_worker
while True:
if self._cur_worker >= rewind_point:
self._cur_worker = 0
worker_prc = self._workers[self._cur_worker]
if worker_prc is None or not worker_prc.is_alive():
self._queued_task_cache[(host.name, task._uuid)] = {
'host': host,
'task': task,
'task_vars': task_vars,
'play_context': play_context
}
worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, plugin_loader)
self._workers[self._cur_worker] = worker_prc
self._tqm.send_callback('v2_runner_on_start', host, task)
worker_prc.start()
display.debug("worker is %d (out of %d available)" % (self._cur_worker + 1, len(self._workers)))
queued = True
self._cur_worker += 1
if self._cur_worker >= rewind_point:
self._cur_worker = 0
if queued:
break
elif self._cur_worker == starting_worker:
time.sleep(0.0001)
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
display.debug("got an error while queuing: %s" % e)
return
display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
def get_task_hosts(self, iterator, task_host, task):
if task.run_once:
host_list = [host for host in self._hosts_cache if host not in self._tqm._unreachable_hosts]
else:
host_list = [task_host.name]
return host_list
def get_delegated_hosts(self, result, task):
host_name = result.get('_ansible_delegated_vars', {}).get('ansible_delegated_host', None)
return [host_name or task.delegate_to]
def _set_always_delegated_facts(self, result, task):
"""Sets host facts for ``delegate_to`` hosts for facts that should
always be delegated
This operation mutates ``result`` to remove the always delegated facts
See ``ALWAYS_DELEGATE_FACT_PREFIXES``
"""
if task.delegate_to is None:
return
facts = result['ansible_facts']
always_keys = set()
_add = always_keys.add
for fact_key in facts:
for always_key in ALWAYS_DELEGATE_FACT_PREFIXES:
if fact_key.startswith(always_key):
_add(fact_key)
if always_keys:
_pop = facts.pop
always_facts = {
'ansible_facts': dict((k, _pop(k)) for k in list(facts) if k in always_keys)
}
host_list = self.get_delegated_hosts(result, task)
_set_host_facts = self._variable_manager.set_host_facts
for target_host in host_list:
_set_host_facts(target_host, always_facts)
def normalize_task_result(self, task_result):
"""Normalize a TaskResult to reference actual Host and Task objects
when only given the ``Host.name``, or the ``Task._uuid``
Only the ``Host.name`` and ``Task._uuid`` are commonly sent back from
the ``TaskExecutor`` or ``WorkerProcess`` due to performance concerns
Mutates the original object
"""
if isinstance(task_result._host, string_types):
# If the value is a string, it is ``Host.name``
task_result._host = self._inventory.get_host(to_text(task_result._host))
if isinstance(task_result._task, string_types):
# If the value is a string, it is ``Task._uuid``
queue_cache_entry = (task_result._host.name, task_result._task)
try:
found_task = self._queued_task_cache[queue_cache_entry]['task']
except KeyError:
# This should only happen due to an implicit task created by the
# TaskExecutor, restrict this behavior to the explicit use case
# of an implicit async_status task
if task_result._task_fields.get('action') != 'async_status':
raise
original_task = Task()
else:
original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
original_task._parent = found_task._parent
original_task.from_attrs(task_result._task_fields)
task_result._task = original_task
return task_result
@debug_closure
def _process_pending_results(self, iterator, one_pass=False, max_passes=None):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
handler_templar = Templar(self._loader)
def search_handler_blocks_by_name(handler_name, handler_blocks):
# iterate in reversed order since last handler loaded with the same name wins
for handler_block in reversed(handler_blocks):
for handler_task in handler_block.block:
if handler_task.name:
try:
if not handler_task.cached_name:
if handler_templar.is_template(handler_task.name):
handler_templar.available_variables = self._variable_manager.get_vars(play=iterator._play,
task=handler_task,
_hosts=self._hosts_cache,
_hosts_all=self._hosts_cache_all)
handler_task.name = handler_templar.template(handler_task.name)
handler_task.cached_name = True
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
candidates = (
handler_task.name,
handler_task.get_name(include_role_fqcn=False),
handler_task.get_name(include_role_fqcn=True),
)
if handler_name in candidates:
return handler_task
except (UndefinedError, AnsibleUndefinedVariable) as e:
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
if not handler_task.listen:
display.warning(
"Handler '%s' is unusable because it has no listen topics and "
"the name could not be templated (host-specific variables are "
"not supported in handler names). The error: %s" % (handler_task.name, to_text(e))
)
continue
cur_pass = 0
while True:
try:
self._results_lock.acquire()
task_result = self._results.popleft()
except IndexError:
break
finally:
self._results_lock.release()
original_host = task_result._host
original_task = task_result._task
# all host status messages contain 2 entries: (msg, task_result)
role_ran = False
if task_result.is_failed():
role_ran = True
ignore_errors = original_task.ignore_errors
if not ignore_errors:
display.debug("marking %s as failed" % original_host.name)
if original_task.run_once:
# if we're using run_once, we have to fail every host here
for h in self._inventory.get_hosts(iterator._play.hosts):
if h.name not in self._tqm._unreachable_hosts:
iterator.mark_host_failed(h)
else:
iterator.mark_host_failed(original_host)
# grab the current state and if we're iterating on the rescue portion
# of a block then we save the failed task in a special var for use
# within the rescue/always
state, _ = iterator.get_next_task_for_host(original_host, peek=True)
if iterator.is_failed(original_host) and state and state.run_state == IteratingStates.COMPLETE:
self._tqm._failed_hosts[original_host.name] = True
# Use of get_active_state() here helps detect proper state if, say, we are in a rescue
# block from an included file (include_tasks). In a non-included rescue case, a rescue
# that starts with a new 'block' will have an active state of IteratingStates.TASKS, so we also
# check the current state block tree to see if any blocks are rescuing.
if state and (iterator.get_active_state(state).run_state == IteratingStates.RESCUE or
iterator.is_any_block_rescuing(state)):
self._tqm._stats.increment('rescued', original_host.name)
iterator._play._removed_hosts.remove(original_host.name)
self._variable_manager.set_nonpersistent_facts(
original_host.name,
dict(
ansible_failed_task=wrap_var(original_task.serialize()),
ansible_failed_result=task_result._result,
),
)
else:
self._tqm._stats.increment('failures', original_host.name)
else:
self._tqm._stats.increment('ok', original_host.name)
self._tqm._stats.increment('ignored', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors)
elif task_result.is_unreachable():
ignore_unreachable = original_task.ignore_unreachable
if not ignore_unreachable:
self._tqm._unreachable_hosts[original_host.name] = True
iterator._play._removed_hosts.append(original_host.name)
self._tqm._stats.increment('dark', original_host.name)
else:
self._tqm._stats.increment('ok', original_host.name)
self._tqm._stats.increment('ignored', original_host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif task_result.is_skipped():
self._tqm._stats.increment('skipped', original_host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
else:
role_ran = True
if original_task.loop:
# this task had a loop, and has more than one result, so
# loop over all of them instead of a single result
result_items = task_result._result.get('results', [])
else:
result_items = [task_result._result]
for result_item in result_items:
if '_ansible_notify' in result_item:
if task_result.is_changed():
# The shared dictionary for notified handlers is a proxy, which
# does not detect when sub-objects within the proxy are modified.
# So, per the docs, we reassign the list so the proxy picks up and
# notifies all other threads
for handler_name in result_item['_ansible_notify']:
found = False
# Find the handler using the above helper. First we look up the
# dependency chain of the current task (if it's from a role), otherwise
# we just look through the list of handlers in the current play/all
# roles and use the first one that matches the notify name
target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers)
if target_handler is not None:
found = True
if target_handler.notify_host(original_host):
self._tqm.send_callback('v2_playbook_on_notify', target_handler, original_host)
for listening_handler_block in iterator._play.handlers:
for listening_handler in listening_handler_block.block:
listeners = getattr(listening_handler, 'listen', []) or []
if not listeners:
continue
listeners = listening_handler.get_validated_value(
'listen', listening_handler.fattributes.get('listen'), listeners, handler_templar
)
if handler_name not in listeners:
continue
else:
found = True
if listening_handler.notify_host(original_host):
self._tqm.send_callback('v2_playbook_on_notify', listening_handler, original_host)
# and if none were found, then we raise an error
if not found:
msg = ("The requested handler '%s' was not found in either the main handlers list nor in the listening "
"handlers list" % handler_name)
if C.ERROR_ON_MISSING_HANDLER:
raise AnsibleError(msg)
else:
display.warning(msg)
if 'add_host' in result_item:
# this task added a new host (add_host module)
new_host_info = result_item.get('add_host', dict())
self._inventory.add_dynamic_host(new_host_info, result_item)
# ensure host is available for subsequent plays
if result_item.get('changed') and new_host_info['host_name'] not in self._hosts_cache_all:
self._hosts_cache_all.append(new_host_info['host_name'])
elif 'add_group' in result_item:
# this task added a new group (group_by module)
self._inventory.add_dynamic_group(original_host, result_item)
if 'add_host' in result_item or 'add_group' in result_item:
item_vars = _get_item_vars(result_item, original_task)
found_task_vars = self._queued_task_cache.get((original_host.name, task_result._task._uuid))['task_vars']
if item_vars:
all_task_vars = combine_vars(found_task_vars, item_vars)
else:
all_task_vars = found_task_vars
all_task_vars[original_task.register] = wrap_var(result_item)
post_process_whens(result_item, original_task, handler_templar, all_task_vars)
if original_task.loop or original_task.loop_with:
new_item_result = TaskResult(
task_result._host,
task_result._task,
result_item,
task_result._task_fields,
)
self._tqm.send_callback('v2_runner_item_on_ok', new_item_result)
if result_item.get('changed', False):
task_result._result['changed'] = True
if result_item.get('failed', False):
task_result._result['failed'] = True
if 'ansible_facts' in result_item and original_task.action not in C._ACTION_DEBUG:
# if delegated fact and we are delegating facts, we need to change target host for them
if original_task.delegate_to is not None and original_task.delegate_facts:
host_list = self.get_delegated_hosts(result_item, original_task)
else:
# Set facts that should always be on the delegated hosts
self._set_always_delegated_facts(result_item, original_task)
host_list = self.get_task_hosts(iterator, original_host, original_task)
if original_task.action in C._ACTION_INCLUDE_VARS:
for (var_name, var_value) in result_item['ansible_facts'].items():
# find the host we're actually referring too here, which may
# be a host that is not really in inventory at all
for target_host in host_list:
self._variable_manager.set_host_variable(target_host, var_name, var_value)
else:
cacheable = result_item.pop('_ansible_facts_cacheable', False)
for target_host in host_list:
# so set_fact is a misnomer but 'cacheable = true' was meant to create an 'actual fact'
# to avoid issues with precedence and confusion with set_fact normal operation,
# we set BOTH fact and nonpersistent_facts (aka hostvar)
# when fact is retrieved from cache in subsequent operations it will have the lower precedence,
# but for playbook setting it the 'higher' precedence is kept
is_set_fact = original_task.action in C._ACTION_SET_FACT
if not is_set_fact or cacheable:
self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy())
if is_set_fact:
self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy())
if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']:
if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']:
host_list = self.get_task_hosts(iterator, original_host, original_task)
else:
host_list = [None]
data = result_item['ansible_stats']['data']
aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate']
for myhost in host_list:
for k in data.keys():
if aggregate:
self._tqm._stats.update_custom_stats(k, data[k], myhost)
else:
self._tqm._stats.set_custom_stats(k, data[k], myhost)
if 'diff' in task_result._result:
if self._diff or getattr(original_task, 'diff', False):
self._tqm.send_callback('v2_on_file_diff', task_result)
if not isinstance(original_task, TaskInclude):
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
# finally, send the ok for this task
self._tqm.send_callback('v2_runner_on_ok', task_result)
# register final results
if original_task.register:
host_list = self.get_task_hosts(iterator, original_host, original_task)
clean_copy = strip_internal_keys(module_response_deepcopy(task_result._result))
if 'invocation' in clean_copy:
del clean_copy['invocation']
for target_host in host_list:
self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy})
self._pending_results -= 1
if original_host.name in self._blocked_hosts:
del self._blocked_hosts[original_host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if original_task._role is not None and role_ran: # TODO: and original_task.action not in C._ACTION_INCLUDE_ROLE:?
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in iterator._play.ROLE_CACHE[original_task._role.get_name()].items():
if role_obj._uuid == original_task._role._uuid:
role_obj._had_task_run[original_host.name] = True
ret_results.append(task_result)
if isinstance(original_task, Handler):
for handler in (h for b in iterator._play.handlers for h in b.block if h._uuid == original_task._uuid):
handler.remove_host(original_host)
if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes:
break
cur_pass += 1
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
display.debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator)
ret_results.extend(results)
if self._pending_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending results, returning what we have")
return ret_results
def _copy_included_file(self, included_file):
'''
A proven safe and performant way to create a copy of an included file
'''
ti_copy = included_file._task.copy(exclude_parent=True)
ti_copy._parent = included_file._task._parent
temp_vars = ti_copy.vars | included_file._vars
ti_copy.vars = temp_vars
return ti_copy
def _load_included_file(self, included_file, iterator, is_handler=False):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
Raises AnsibleError exception in case of a failure during including a file,
in such case the caller is responsible for marking the host(s) as failed
using PlayIterator.mark_host_failed().
'''
display.debug("loading included file: %s" % included_file._filename)
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
elif not isinstance(data, list):
raise AnsibleError("included task files must contain a list of tasks")
ti_copy = self._copy_included_file(included_file)
block_list = load_list_of_blocks(
data,
play=iterator._play,
parent_block=ti_copy.build_parent_block(),
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader,
variable_manager=self._variable_manager,
)
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
except AnsibleParserError:
raise
except AnsibleError as e:
if isinstance(e, AnsibleFileNotFound):
reason = "Could not find or access '%s' on the Ansible Controller." % to_text(e.file_name)
else:
reason = to_text(e)
for r in included_file._results:
r._result['failed'] = True
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=reason))
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
raise AnsibleError(reason) from e
# finally, send the callback and return the list of blocks loaded
self._tqm.send_callback('v2_playbook_on_include', included_file)
display.debug("done processing included file")
return block_list
def _take_step(self, task, host=None):
ret = False
msg = u'Perform task: %s ' % task
if host:
msg += u'on %s ' % host
msg += u'(N)o/(y)es/(c)ontinue: '
resp = display.prompt(msg)
if resp.lower() in ['y', 'yes']:
display.debug("User ran task")
ret = True
elif resp.lower() in ['c', 'continue']:
display.debug("User ran task and canceled step mode")
self._step = False
ret = True
else:
display.debug("User skipped task")
display.banner(msg)
return ret
def _cond_not_supported_warn(self, task_name):
display.warning("%s task does not support when conditional" % task_name)
def _execute_meta(self, task, play_context, iterator, target_host):
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
def _evaluate_conditional(h):
all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
templar = Templar(loader=self._loader, variables=all_vars)
return task.evaluate_conditional(templar, all_vars)
skipped = False
msg = ''
skip_reason = '%s conditional evaluated to False' % meta_action
if isinstance(task, Handler):
self._tqm.send_callback('v2_playbook_on_handler_task_start', task)
else:
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
# These don't support "when" conditionals
if meta_action in ('noop', 'refresh_inventory', 'reset_connection') and task.when:
self._cond_not_supported_warn(meta_action)
if meta_action == 'noop':
msg = "noop"
elif meta_action == 'flush_handlers':
if _evaluate_conditional(target_host):
host_state = iterator.get_state_for_host(target_host.name)
if host_state.run_state == IteratingStates.HANDLERS:
raise AnsibleError('flush_handlers cannot be used as a handler')
if target_host.name not in self._tqm._unreachable_hosts:
host_state.pre_flushing_run_state = host_state.run_state
host_state.run_state = IteratingStates.HANDLERS
msg = "triggered running handlers for %s" % target_host.name
else:
skipped = True
skip_reason += ', not running handlers for %s' % target_host.name
elif meta_action == 'refresh_inventory':
self._inventory.refresh_inventory()
self._set_hosts_cache(iterator._play)
msg = "inventory successfully refreshed"
elif meta_action == 'clear_facts':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
hostname = host.get_name()
self._variable_manager.clear_facts(hostname)
msg = "facts cleared"
else:
skipped = True
skip_reason += ', not clearing facts and fact cache for %s' % target_host.name
elif meta_action == 'clear_host_errors':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
self._tqm._failed_hosts.pop(host.name, False)
self._tqm._unreachable_hosts.pop(host.name, False)
iterator.clear_host_errors(host)
msg = "cleared host errors"
else:
skipped = True
skip_reason += ', not clearing host error state for %s' % target_host.name
elif meta_action == 'end_batch':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator.set_run_state_for_host(host.name, IteratingStates.COMPLETE)
msg = "ending batch"
else:
skipped = True
skip_reason += ', continuing current batch'
elif meta_action == 'end_play':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator.set_run_state_for_host(host.name, IteratingStates.COMPLETE)
# end_play is used in PlaybookExecutor/TQM to indicate that
# the whole play is supposed to be ended as opposed to just a batch
iterator.end_play = True
msg = "ending play"
else:
skipped = True
skip_reason += ', continuing play'
elif meta_action == 'end_host':
if _evaluate_conditional(target_host):
iterator.set_run_state_for_host(target_host.name, IteratingStates.COMPLETE)
iterator._play._removed_hosts.append(target_host.name)
msg = "ending play for %s" % target_host.name
else:
skipped = True
skip_reason += ", continuing execution for %s" % target_host.name
# TODO: Nix msg here? Left for historical reasons, but skip_reason exists now.
msg = "end_host conditional evaluated to false, continuing execution for %s" % target_host.name
elif meta_action == 'role_complete':
# Allow users to use this in a play as reported in https://github.com/ansible/ansible/issues/22286?
# How would this work with allow_duplicates??
if task.implicit:
if target_host.name in task._role._had_task_run:
task._role._completed[target_host.name] = True
msg = 'role_complete for %s' % target_host.name
elif meta_action == 'reset_connection':
all_vars = self._variable_manager.get_vars(play=iterator._play, host=target_host, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
templar = Templar(loader=self._loader, variables=all_vars)
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
play_context = play_context.set_task_and_variable_override(task=task, variables=all_vars, templar=templar)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
play_context.post_validate(templar=templar)
# now that the play context is finalized, if the remote_addr is not set
# default to using the host's address field as the remote address
if not play_context.remote_addr:
play_context.remote_addr = target_host.address
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist. This 'mostly' works here cause meta
# disregards the loop, but should not really use play_context at all
play_context.update_vars(all_vars)
if target_host in self._active_connections:
connection = Connection(self._active_connections[target_host])
del self._active_connections[target_host]
else:
connection = plugin_loader.connection_loader.get(play_context.connection, play_context, os.devnull)
connection.set_options(task_keys=task.dump_attrs(), var_options=all_vars)
play_context.set_attributes_from_plugin(connection)
if connection:
try:
connection.reset()
msg = 'reset connection'
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
else:
msg = 'no connection, nothing to reset'
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
result = {'msg': msg}
if skipped:
result['skipped'] = True
result['skip_reason'] = skip_reason
else:
result['changed'] = False
display.vv("META: %s" % msg)
if isinstance(task, Handler):
task.remove_host(target_host)
res = TaskResult(target_host, task, result)
if skipped:
self._tqm.send_callback('v2_runner_on_skipped', res)
return [res]
def get_hosts_left(self, iterator):
''' returns list of available hosts for this iterator by filtering out unreachables '''
hosts_left = []
for host in self._hosts_cache:
if host not in self._tqm._unreachable_hosts:
try:
hosts_left.append(self._inventory.hosts[host])
except KeyError:
hosts_left.append(self._inventory.get_host(host))
return hosts_left
def update_active_connections(self, results):
''' updates the current active persistent connections '''
for r in results:
if 'args' in r._task_fields:
socket_path = r._task_fields['args'].get('_ansible_socket')
if socket_path:
if r._host not in self._active_connections:
self._active_connections[r._host] = socket_path
class NextAction(object):
""" The next action after an interpreter's exit. """
REDO = 1
CONTINUE = 2
EXIT = 3
def __init__(self, result=EXIT):
self.result = result
class Debugger(cmd.Cmd):
prompt_continuous = '> ' # multiple lines
def __init__(self, task, host, task_vars, play_context, result, next_action):
# cmd.Cmd is old-style class
cmd.Cmd.__init__(self)
self.prompt = '[%s] %s (debug)> ' % (host, task)
self.intro = None
self.scope = {}
self.scope['task'] = task
self.scope['task_vars'] = task_vars
self.scope['host'] = host
self.scope['play_context'] = play_context
self.scope['result'] = result
self.next_action = next_action
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
pass
do_h = cmd.Cmd.do_help
def do_EOF(self, args):
"""Quit"""
return self.do_quit(args)
def do_quit(self, args):
"""Quit"""
display.display('User interrupted execution')
self.next_action.result = NextAction.EXIT
return True
do_q = do_quit
def do_continue(self, args):
"""Continue to next result"""
self.next_action.result = NextAction.CONTINUE
return True
do_c = do_continue
def do_redo(self, args):
"""Schedule task for re-execution. The re-execution may not be the next result"""
self.next_action.result = NextAction.REDO
return True
do_r = do_redo
def do_update_task(self, args):
"""Recreate the task from ``task._ds``, and template with updated ``task_vars``"""
templar = Templar(None, variables=self.scope['task_vars'])
task = self.scope['task']
task = task.load_data(task._ds)
task.post_validate(templar)
self.scope['task'] = task
do_u = do_update_task
def evaluate(self, args):
try:
return eval(args, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def do_pprint(self, args):
"""Pretty Print"""
try:
result = self.evaluate(args)
display.display(pprint.pformat(result))
except Exception:
pass
do_p = do_pprint
def execute(self, args):
try:
code = compile(args + '\n', '<stdin>', 'single')
exec(code, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def default(self, line):
try:
self.execute(line)
except Exception:
pass
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 72,638 |
How to rescue and re-raise errors with context information
|
##### SUMMARY
There are cases where you are using a role from inside a block, and the role itself uses a block, and you want to propagate errors from the inner block to be handled in a `rescue` in an outer block, and you want access to the `ansible_failed_result` (and `ansible_failed_task` - but see https://github.com/ansible/ansible/issues/57399) from the inner block. This works fine if the inner block has no `rescue` or `always` clause - the error from the inner block is propagated to the outer block where it can be caught in a `rescue` clause with the expected information set in `ansible_failed_result`.
The problem arises when you want to use `always` or `rescue` in the inner block. The outer `rescue` clause is still called, which means an error was detected by Ansible and handled, but the context information is gone and `ansible_failed_result` is undefined. Same if a `rescue` is used in the inner block, with or without the `always`. You can re-raise the error with the `ansible_failed_result` by using it as the only value for a `fail` module `msg` argument:
```yaml
- block:
...
rescue:
- name: re-raise the error from the inner block
fail:
msg: "{{ ansible_failed_result }}"
```
This works in Ansible 2.9 and 2.10, but I don't know if it is "accidental" behavior or if this is the fully supported way to do this. I would like to see if this is officially supported by Ansible core and, if this is the correct way to "catch" and "re-raise" errors, it should be documented in the block/rescue/always documentation.
See also https://richm.github.io/how-to-catch-and-reraise-errors-in-ansible
##### ISSUE TYPE
- Documentation Report
##### COMPONENT NAME
https://docs.ansible.com/ansible/latest/user_guide/playbooks_blocks.html#
##### ANSIBLE VERSION
```
ansible 2.9.14
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/rmeggins/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.8.6 (default, Sep 25 2020, 00:00:00) [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
```
##### CONFIGURATION
```
no output?
```
##### OS / ENVIRONMENT
Fedora 32 - ansible 2.9 and 2.10
##### ADDITIONAL INFORMATION
https://richm.github.io/how-to-catch-and-reraise-errors-in-ansible
|
https://github.com/ansible/ansible/issues/72638
|
https://github.com/ansible/ansible/pull/78676
|
848143640ba88f34e6e952faba4e1b5fd1c1b2dd
|
fd19ff231055c439c6a2e9bb590fef09818b2afc
| 2020-11-16T19:03:36Z |
python
| 2022-09-06T15:11:49Z |
test/integration/targets/blocks/43191-2.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 72,638 |
How to rescue and re-raise errors with context information
|
##### SUMMARY
There are cases where you are using a role from inside a block, and the role itself uses a block, and you want to propagate errors from the inner block to be handled in a `rescue` in an outer block, and you want access to the `ansible_failed_result` (and `ansible_failed_task` - but see https://github.com/ansible/ansible/issues/57399) from the inner block. This works fine if the inner block has no `rescue` or `always` clause - the error from the inner block is propagated to the outer block where it can be caught in a `rescue` clause with the expected information set in `ansible_failed_result`.
The problem arises when you want to use `always` or `rescue` in the inner block. The outer `rescue` clause is still called, which means an error was detected by Ansible and handled, but the context information is gone and `ansible_failed_result` is undefined. Same if a `rescue` is used in the inner block, with or without the `always`. You can re-raise the error with the `ansible_failed_result` by using it as the only value for a `fail` module `msg` argument:
```yaml
- block:
...
rescue:
- name: re-raise the error from the inner block
fail:
msg: "{{ ansible_failed_result }}"
```
This works in Ansible 2.9 and 2.10, but I don't know if it is "accidental" behavior or if this is the fully supported way to do this. I would like to see if this is officially supported by Ansible core and, if this is the correct way to "catch" and "re-raise" errors, it should be documented in the block/rescue/always documentation.
See also https://richm.github.io/how-to-catch-and-reraise-errors-in-ansible
##### ISSUE TYPE
- Documentation Report
##### COMPONENT NAME
https://docs.ansible.com/ansible/latest/user_guide/playbooks_blocks.html#
##### ANSIBLE VERSION
```
ansible 2.9.14
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/rmeggins/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.8.6 (default, Sep 25 2020, 00:00:00) [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
```
##### CONFIGURATION
```
no output?
```
##### OS / ENVIRONMENT
Fedora 32 - ansible 2.9 and 2.10
##### ADDITIONAL INFORMATION
https://richm.github.io/how-to-catch-and-reraise-errors-in-ansible
|
https://github.com/ansible/ansible/issues/72638
|
https://github.com/ansible/ansible/pull/78676
|
848143640ba88f34e6e952faba4e1b5fd1c1b2dd
|
fd19ff231055c439c6a2e9bb590fef09818b2afc
| 2020-11-16T19:03:36Z |
python
| 2022-09-06T15:11:49Z |
test/integration/targets/blocks/43191.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 72,638 |
How to rescue and re-raise errors with context information
|
##### SUMMARY
There are cases where you are using a role from inside a block, and the role itself uses a block, and you want to propagate errors from the inner block to be handled in a `rescue` in an outer block, and you want access to the `ansible_failed_result` (and `ansible_failed_task` - but see https://github.com/ansible/ansible/issues/57399) from the inner block. This works fine if the inner block has no `rescue` or `always` clause - the error from the inner block is propagated to the outer block where it can be caught in a `rescue` clause with the expected information set in `ansible_failed_result`.
The problem arises when you want to use `always` or `rescue` in the inner block. The outer `rescue` clause is still called, which means an error was detected by Ansible and handled, but the context information is gone and `ansible_failed_result` is undefined. Same if a `rescue` is used in the inner block, with or without the `always`. You can re-raise the error with the `ansible_failed_result` by using it as the only value for a `fail` module `msg` argument:
```yaml
- block:
...
rescue:
- name: re-raise the error from the inner block
fail:
msg: "{{ ansible_failed_result }}"
```
This works in Ansible 2.9 and 2.10, but I don't know if it is "accidental" behavior or if this is the fully supported way to do this. I would like to see if this is officially supported by Ansible core and, if this is the correct way to "catch" and "re-raise" errors, it should be documented in the block/rescue/always documentation.
See also https://richm.github.io/how-to-catch-and-reraise-errors-in-ansible
##### ISSUE TYPE
- Documentation Report
##### COMPONENT NAME
https://docs.ansible.com/ansible/latest/user_guide/playbooks_blocks.html#
##### ANSIBLE VERSION
```
ansible 2.9.14
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/rmeggins/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.8.6 (default, Sep 25 2020, 00:00:00) [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
```
##### CONFIGURATION
```
no output?
```
##### OS / ENVIRONMENT
Fedora 32 - ansible 2.9 and 2.10
##### ADDITIONAL INFORMATION
https://richm.github.io/how-to-catch-and-reraise-errors-in-ansible
|
https://github.com/ansible/ansible/issues/72638
|
https://github.com/ansible/ansible/pull/78676
|
848143640ba88f34e6e952faba4e1b5fd1c1b2dd
|
fd19ff231055c439c6a2e9bb590fef09818b2afc
| 2020-11-16T19:03:36Z |
python
| 2022-09-06T15:11:49Z |
test/integration/targets/blocks/runme.sh
|
#!/usr/bin/env bash
set -eux
# This test does not use "$@" to avoid further increasing the verbosity beyond what is required for the test.
# Increasing verbosity from -vv to -vvv can increase the line count from ~400 to ~9K on our centos6 test container.
# remove old output log
rm -f block_test.out
# run the test and check to make sure the right number of completions was logged
ansible-playbook -vv main.yml -i ../../inventory | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
# cleanup the output log again, to make sure the test is clean
rm -f block_test.out block_test_wo_colors.out
# run test with free strategy and again count the completions
ansible-playbook -vv main.yml -i ../../inventory -e test_strategy=free | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
# cleanup the output log again, to make sure the test is clean
rm -f block_test.out block_test_wo_colors.out
# run test with host_pinned strategy and again count the completions
ansible-playbook -vv main.yml -i ../../inventory -e test_strategy=host_pinned | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
# run test that includes tasks that fail inside a block with always
rm -f block_test.out block_test_wo_colors.out
ansible-playbook -vv block_fail.yml -i ../../inventory | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
ansible-playbook -vv block_rescue_vars.yml
# https://github.com/ansible/ansible/issues/70000
set +e
exit_code=0
ansible-playbook -vv always_failure_with_rescue_rc.yml > rc_test.out || exit_code=$?
set -e
cat rc_test.out
[ $exit_code -eq 2 ]
[ "$(grep -c 'Failure in block' rc_test.out )" -eq 1 ]
[ "$(grep -c 'Rescue' rc_test.out )" -eq 1 ]
[ "$(grep -c 'Failure in always' rc_test.out )" -eq 1 ]
[ "$(grep -c 'DID NOT RUN' rc_test.out )" -eq 0 ]
rm -f rc_test_out
set +e
exit_code=0
ansible-playbook -vv always_no_rescue_rc.yml > rc_test.out || exit_code=$?
set -e
cat rc_test.out
[ $exit_code -eq 2 ]
[ "$(grep -c 'Failure in block' rc_test.out )" -eq 1 ]
[ "$(grep -c 'Always' rc_test.out )" -eq 1 ]
[ "$(grep -c 'DID NOT RUN' rc_test.out )" -eq 0 ]
rm -f rc_test.out
set +e
exit_code=0
ansible-playbook -vv always_failure_no_rescue_rc.yml > rc_test.out || exit_code=$?
set -e
cat rc_test.out
[ $exit_code -eq 2 ]
[ "$(grep -c 'Failure in block' rc_test.out )" -eq 1 ]
[ "$(grep -c 'Failure in always' rc_test.out )" -eq 1 ]
[ "$(grep -c 'DID NOT RUN' rc_test.out )" -eq 0 ]
rm -f rc_test.out
# https://github.com/ansible/ansible/issues/29047
ansible-playbook -vv issue29047.yml -i ../../inventory
# https://github.com/ansible/ansible/issues/61253
ansible-playbook -vv block_in_rescue.yml -i ../../inventory > rc_test.out
cat rc_test.out
[ "$(grep -c 'rescued=3' rc_test.out)" -eq 1 ]
[ "$(grep -c 'failed=0' rc_test.out)" -eq 1 ]
rm -f rc_test.out
# https://github.com/ansible/ansible/issues/71306
set +e
exit_code=0
ansible-playbook -i host1,host2 -vv issue71306.yml > rc_test.out || exit_code=$?
set -e
cat rc_test.out
[ $exit_code -eq 0 ]
rm -f rc_test_out
# https://github.com/ansible/ansible/issues/69848
ansible-playbook -i host1,host2 --tags foo -vv 69848.yml > role_complete_test.out
cat role_complete_test.out
[ "$(grep -c 'Tagged task' role_complete_test.out)" -eq 2 ]
[ "$(grep -c 'Not tagged task' role_complete_test.out)" -eq 0 ]
rm -f role_complete_test.out
# test notify inheritance
ansible-playbook inherit_notify.yml "$@"
ansible-playbook unsafe_failed_task.yml "$@"
ansible-playbook finalized_task.yml "$@"
# https://github.com/ansible/ansible/issues/72725
ansible-playbook -i host1,host2 -vv 72725.yml
# https://github.com/ansible/ansible/issues/72781
set +e
ansible-playbook -i host1,host2 -vv 72781.yml > 72781.out
set -e
cat 72781.out
[ "$(grep -c 'SHOULD NOT HAPPEN' 72781.out)" -eq 0 ]
rm -f 72781.out
set +e
ansible-playbook -i host1,host2 -vv 78612.yml | tee 78612.out
set -e
[ "$(grep -c 'PASSED' 78612.out)" -eq 1 ]
rm -f 78612.out
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 77,537 |
Wrong OS family fact reported for Flatcar
|
### Summary
Ansible may report the wrong `ansible_os_family` fact on Flatcar Container Linux. The correct value is `Flatcar`, however some users are seeing `Flatcar Container Linux by Kinvolk` for the same fact under certain circumstances, which leads to wrong playbook behavior given that Ansible fails to identify Flatcar as the running distro.
In https://github.com/ansible/ansible/pull/69627 we've contributed Ansible core logic which affects the value of `ansible_os_family` for Flatcar. Back then we've specified `/etc/flatcar/update.conf` as the file based on which to figure out the name of the distro (likely because that's how CoreOS - Flatcar's direct ancestor - used to do the same). **This decision no longer makes sense to us** given that this file isn't the authoritative place for the distro name. Moreover, `/etc/flatcar/update.conf` may sometimes be missing based on user-provided bootstrap configuration.
### Issue Type
Bug Report
### Component Name
Ansible core
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = None
configured module search path = ['/home/me/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/me/.local/lib/python3.8/site-packages/ansible
ansible collection location = /home/me/.ansible/collections:/usr/share/ansible/collections
executable location = /home/me/.local/bin/ansible
python version = 3.8.10 (default, Nov 26 2021, 20:14:08) [GCC 9.3.0]
jinja version = 3.1.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
Flatcar Container Linux 3139.2.0 for example
### Steps to Reproduce
Follow the official [instructions](https://www.flatcar.org/docs/latest/installing/vms/qemu/) to run Flatcar locally on QEMU:
```
mkdir flatcar; cd flatcar
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu.sh
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu.sh.sig
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu_image.img.bz2
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu_image.img.bz2.sig
bzip2 -d flatcar_production_qemu_image.img.bz2
chmod +x flatcar_production_qemu.sh
ssh-keygen -f key -q -N ""
./flatcar_production_qemu.sh -a ./key.pub -- -nographic
```
On another shell, SSH into the Flatcar VM:
```
cd flatcar
ssh -p 2222 -i key core@localhost
```
Install PyPy (Flatcar doesn't ship with a Python interpreter):
```
cd /opt
wget -O - https://downloads.python.org/pypy/pypy3.8-v7.3.9-linux64.tar.bz2 | sudo tar xjf -
sudo ln -s /opt/pypy3.8-v7.3.9-linux64/bin/pypy /opt/bin/python
```
Run Ansible against the VM and print the OS family:
```
cat <<EOF >playbook.yaml
- hosts: all
user: core
tasks:
- name: Print distro
debug:
var: ansible_os_family
EOF
ansible-playbook playbook.yaml -i localhost:2222, --key-file ./key -e ansible_python_interpreter=/opt/bin/python -e ansible_port=2222
```
Output:
```
PLAY [all] *****************************************************************************************************************************************************************************************************************************************************
TASK [Gathering Facts] *****************************************************************************************************************************************************************************************************************************************ok: [localhost]
TASK [Print distro] ********************************************************************************************************************************************************************************************************************************************ok: [localhost] => {
"ansible_os_family": "Flatcar Container Linux by Kinvolk"
}
PLAY RECAP *****************************************************************************************************************************************************************************************************************************************************localhost : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
SSH into the VM and populate `/etc/flatcar/update.conf` with dummy values:
```
printf "SERVER=foo\nGROUP=bar\n" | sudo tee /etc/flatcar/update.conf
```
Run the play book again:
```
ansible-playbook playbook.yaml -i localhost:2222, --key-file ./key -e ansible_python_interpreter=/opt/bin/python -e ansible_port=2222
```
Output:
```
PLAY [all] *****************************************************************************************************************************************************************************************************************************************************
TASK [Gathering Facts] *****************************************************************************************************************************************************************************************************************************************ok: [localhost]
TASK [Print distro] ********************************************************************************************************************************************************************************************************************************************ok: [localhost] => {
"ansible_os_family": "Flatcar"
}
PLAY RECAP *****************************************************************************************************************************************************************************************************************************************************localhost : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Expected Results
I expected to always see `Flatcar` as `ansible_os_family` on Flatcar Container Linux.
### Actual Results
```console
The value of `ansible_os_family` changes based on the existence of `/etc/flatcar/update.conf`.
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/77537
|
https://github.com/ansible/ansible/pull/77635
|
77ba025a1301c62dd945fd0f18153c5eef9a0b77
|
fbd828673de3d4eed525a982d75ace39a1f9eef1
| 2022-04-14T13:04:38Z |
python
| 2022-09-07T17:14:12Z |
hacking/tests/gen_distribution_version_testcase.py
|
#!/usr/bin/env python
"""
This script generated test_cases for test_distribution_version.py.
To do so it outputs the relevant files from /etc/*release, the output of distro.linux_distribution()
and the current ansible_facts regarding the distribution version.
This assumes a working ansible version in the path.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os.path
import platform
import subprocess
import sys
from ansible.module_utils import distro
from ansible.module_utils._text import to_text
filelist = [
'/etc/oracle-release',
'/etc/slackware-version',
'/etc/centos-release',
'/etc/redhat-release',
'/etc/vmware-release',
'/etc/openwrt_release',
'/etc/system-release',
'/etc/alpine-release',
'/etc/release',
'/etc/arch-release',
'/etc/os-release',
'/etc/SuSE-release',
'/etc/gentoo-release',
'/etc/os-release',
'/etc/lsb-release',
'/etc/altlinux-release',
'/etc/os-release',
'/etc/coreos/update.conf',
'/etc/flatcar/update.conf',
'/usr/lib/os-release',
]
fcont = {}
for f in filelist:
if os.path.exists(f):
s = os.path.getsize(f)
if s > 0 and s < 10000:
with open(f) as fh:
fcont[f] = fh.read()
dist = (distro.id(), distro.version(), distro.codename())
facts = ['distribution', 'distribution_version', 'distribution_release', 'distribution_major_version', 'os_family']
try:
b_ansible_out = subprocess.check_output(
['ansible', 'localhost', '-m', 'setup'])
except subprocess.CalledProcessError as e:
print("ERROR: ansible run failed, output was: \n")
print(e.output)
sys.exit(e.returncode)
ansible_out = to_text(b_ansible_out)
parsed = json.loads(ansible_out[ansible_out.index('{'):])
ansible_facts = {}
for fact in facts:
try:
ansible_facts[fact] = parsed['ansible_facts']['ansible_' + fact]
except Exception:
ansible_facts[fact] = "N/A"
nicename = ansible_facts['distribution'] + ' ' + ansible_facts['distribution_version']
output = {
'name': nicename,
'distro': {
'codename': distro.codename(),
'id': distro.id(),
'name': distro.name(),
'version': distro.version(),
'version_best': distro.version(best=True),
'lsb_release_info': distro.lsb_release_info(),
'os_release_info': distro.os_release_info(),
},
'input': fcont,
'platform.dist': dist,
'result': ansible_facts,
}
system = platform.system()
if system != 'Linux':
output['platform.system'] = system
release = platform.release()
if release:
output['platform.release'] = release
print(json.dumps(output, indent=4))
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 77,537 |
Wrong OS family fact reported for Flatcar
|
### Summary
Ansible may report the wrong `ansible_os_family` fact on Flatcar Container Linux. The correct value is `Flatcar`, however some users are seeing `Flatcar Container Linux by Kinvolk` for the same fact under certain circumstances, which leads to wrong playbook behavior given that Ansible fails to identify Flatcar as the running distro.
In https://github.com/ansible/ansible/pull/69627 we've contributed Ansible core logic which affects the value of `ansible_os_family` for Flatcar. Back then we've specified `/etc/flatcar/update.conf` as the file based on which to figure out the name of the distro (likely because that's how CoreOS - Flatcar's direct ancestor - used to do the same). **This decision no longer makes sense to us** given that this file isn't the authoritative place for the distro name. Moreover, `/etc/flatcar/update.conf` may sometimes be missing based on user-provided bootstrap configuration.
### Issue Type
Bug Report
### Component Name
Ansible core
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = None
configured module search path = ['/home/me/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/me/.local/lib/python3.8/site-packages/ansible
ansible collection location = /home/me/.ansible/collections:/usr/share/ansible/collections
executable location = /home/me/.local/bin/ansible
python version = 3.8.10 (default, Nov 26 2021, 20:14:08) [GCC 9.3.0]
jinja version = 3.1.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
Flatcar Container Linux 3139.2.0 for example
### Steps to Reproduce
Follow the official [instructions](https://www.flatcar.org/docs/latest/installing/vms/qemu/) to run Flatcar locally on QEMU:
```
mkdir flatcar; cd flatcar
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu.sh
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu.sh.sig
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu_image.img.bz2
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu_image.img.bz2.sig
bzip2 -d flatcar_production_qemu_image.img.bz2
chmod +x flatcar_production_qemu.sh
ssh-keygen -f key -q -N ""
./flatcar_production_qemu.sh -a ./key.pub -- -nographic
```
On another shell, SSH into the Flatcar VM:
```
cd flatcar
ssh -p 2222 -i key core@localhost
```
Install PyPy (Flatcar doesn't ship with a Python interpreter):
```
cd /opt
wget -O - https://downloads.python.org/pypy/pypy3.8-v7.3.9-linux64.tar.bz2 | sudo tar xjf -
sudo ln -s /opt/pypy3.8-v7.3.9-linux64/bin/pypy /opt/bin/python
```
Run Ansible against the VM and print the OS family:
```
cat <<EOF >playbook.yaml
- hosts: all
user: core
tasks:
- name: Print distro
debug:
var: ansible_os_family
EOF
ansible-playbook playbook.yaml -i localhost:2222, --key-file ./key -e ansible_python_interpreter=/opt/bin/python -e ansible_port=2222
```
Output:
```
PLAY [all] *****************************************************************************************************************************************************************************************************************************************************
TASK [Gathering Facts] *****************************************************************************************************************************************************************************************************************************************ok: [localhost]
TASK [Print distro] ********************************************************************************************************************************************************************************************************************************************ok: [localhost] => {
"ansible_os_family": "Flatcar Container Linux by Kinvolk"
}
PLAY RECAP *****************************************************************************************************************************************************************************************************************************************************localhost : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
SSH into the VM and populate `/etc/flatcar/update.conf` with dummy values:
```
printf "SERVER=foo\nGROUP=bar\n" | sudo tee /etc/flatcar/update.conf
```
Run the play book again:
```
ansible-playbook playbook.yaml -i localhost:2222, --key-file ./key -e ansible_python_interpreter=/opt/bin/python -e ansible_port=2222
```
Output:
```
PLAY [all] *****************************************************************************************************************************************************************************************************************************************************
TASK [Gathering Facts] *****************************************************************************************************************************************************************************************************************************************ok: [localhost]
TASK [Print distro] ********************************************************************************************************************************************************************************************************************************************ok: [localhost] => {
"ansible_os_family": "Flatcar"
}
PLAY RECAP *****************************************************************************************************************************************************************************************************************************************************localhost : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Expected Results
I expected to always see `Flatcar` as `ansible_os_family` on Flatcar Container Linux.
### Actual Results
```console
The value of `ansible_os_family` changes based on the existence of `/etc/flatcar/update.conf`.
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/77537
|
https://github.com/ansible/ansible/pull/77635
|
77ba025a1301c62dd945fd0f18153c5eef9a0b77
|
fbd828673de3d4eed525a982d75ace39a1f9eef1
| 2022-04-14T13:04:38Z |
python
| 2022-09-07T17:14:12Z |
lib/ansible/module_utils/facts/system/distribution.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import platform
import re
import ansible.module_utils.compat.typing as t
from ansible.module_utils.common.sys_info import get_distribution, get_distribution_version, \
get_distribution_codename
from ansible.module_utils.facts.utils import get_file_content, get_file_lines
from ansible.module_utils.facts.collector import BaseFactCollector
def get_uname(module, flags=('-v')):
if isinstance(flags, str):
flags = flags.split()
command = ['uname']
command.extend(flags)
rc, out, err = module.run_command(command)
if rc == 0:
return out
return None
def _file_exists(path, allow_empty=False):
# not finding the file, exit early
if not os.path.exists(path):
return False
# if just the path needs to exists (ie, it can be empty) we are done
if allow_empty:
return True
# file exists but is empty and we dont allow_empty
if os.path.getsize(path) == 0:
return False
# file exists with some content
return True
class DistributionFiles:
'''has-a various distro file parsers (os-release, etc) and logic for finding the right one.'''
# every distribution name mentioned here, must have one of
# - allowempty == True
# - be listed in SEARCH_STRING
# - have a function get_distribution_DISTNAME implemented
# keep names in sync with Conditionals page of docs
OSDIST_LIST = (
{'path': '/etc/altlinux-release', 'name': 'Altlinux'},
{'path': '/etc/oracle-release', 'name': 'OracleLinux'},
{'path': '/etc/slackware-version', 'name': 'Slackware'},
{'path': '/etc/centos-release', 'name': 'CentOS'},
{'path': '/etc/redhat-release', 'name': 'RedHat'},
{'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
{'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
{'path': '/etc/os-release', 'name': 'Amazon'},
{'path': '/etc/system-release', 'name': 'Amazon'},
{'path': '/etc/alpine-release', 'name': 'Alpine'},
{'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
{'path': '/etc/os-release', 'name': 'Archlinux'},
{'path': '/etc/os-release', 'name': 'SUSE'},
{'path': '/etc/SuSE-release', 'name': 'SUSE'},
{'path': '/etc/gentoo-release', 'name': 'Gentoo'},
{'path': '/etc/os-release', 'name': 'Debian'},
{'path': '/etc/lsb-release', 'name': 'Debian'},
{'path': '/etc/lsb-release', 'name': 'Mandriva'},
{'path': '/etc/sourcemage-release', 'name': 'SMGL'},
{'path': '/usr/lib/os-release', 'name': 'ClearLinux'},
{'path': '/etc/coreos/update.conf', 'name': 'Coreos'},
{'path': '/etc/flatcar/update.conf', 'name': 'Flatcar'},
{'path': '/etc/os-release', 'name': 'NA'},
)
SEARCH_STRING = {
'OracleLinux': 'Oracle Linux',
'RedHat': 'Red Hat',
'Altlinux': 'ALT',
'SMGL': 'Source Mage GNU/Linux',
}
# We can't include this in SEARCH_STRING because a name match on its keys
# causes a fallback to using the first whitespace separated item from the file content
# as the name. For os-release, that is in form 'NAME=Arch'
OS_RELEASE_ALIAS = {
'Archlinux': 'Arch Linux'
}
STRIP_QUOTES = r'\'\"\\'
def __init__(self, module):
self.module = module
def _get_file_content(self, path):
return get_file_content(path)
def _get_dist_file_content(self, path, allow_empty=False):
# cant find that dist file or it is incorrectly empty
if not _file_exists(path, allow_empty=allow_empty):
return False, None
data = self._get_file_content(path)
return True, data
def _parse_dist_file(self, name, dist_file_content, path, collected_facts):
dist_file_dict = {}
dist_file_content = dist_file_content.strip(DistributionFiles.STRIP_QUOTES)
if name in self.SEARCH_STRING:
# look for the distribution string in the data and replace according to RELEASE_NAME_MAP
# only the distribution name is set, the version is assumed to be correct from distro.linux_distribution()
if self.SEARCH_STRING[name] in dist_file_content:
# this sets distribution=RedHat if 'Red Hat' shows up in data
dist_file_dict['distribution'] = name
dist_file_dict['distribution_file_search_string'] = self.SEARCH_STRING[name]
else:
# this sets distribution to what's in the data, e.g. CentOS, Scientific, ...
dist_file_dict['distribution'] = dist_file_content.split()[0]
return True, dist_file_dict
if name in self.OS_RELEASE_ALIAS:
if self.OS_RELEASE_ALIAS[name] in dist_file_content:
dist_file_dict['distribution'] = name
return True, dist_file_dict
return False, dist_file_dict
# call a dedicated function for parsing the file content
# TODO: replace with a map or a class
try:
# FIXME: most of these dont actually look at the dist file contents, but random other stuff
distfunc_name = 'parse_distribution_file_' + name
distfunc = getattr(self, distfunc_name)
parsed, dist_file_dict = distfunc(name, dist_file_content, path, collected_facts)
return parsed, dist_file_dict
except AttributeError as exc:
self.module.debug('exc: %s' % exc)
# this should never happen, but if it does fail quietly and not with a traceback
return False, dist_file_dict
return True, dist_file_dict
# to debug multiple matching release files, one can use:
# self.facts['distribution_debug'].append({path + ' ' + name:
# (parsed,
# self.facts['distribution'],
# self.facts['distribution_version'],
# self.facts['distribution_release'],
# )})
def _guess_distribution(self):
# try to find out which linux distribution this is
dist = (get_distribution(), get_distribution_version(), get_distribution_codename())
distribution_guess = {
'distribution': dist[0] or 'NA',
'distribution_version': dist[1] or 'NA',
# distribution_release can be the empty string
'distribution_release': 'NA' if dist[2] is None else dist[2]
}
distribution_guess['distribution_major_version'] = distribution_guess['distribution_version'].split('.')[0] or 'NA'
return distribution_guess
def process_dist_files(self):
# Try to handle the exceptions now ...
# self.facts['distribution_debug'] = []
dist_file_facts = {}
dist_guess = self._guess_distribution()
dist_file_facts.update(dist_guess)
for ddict in self.OSDIST_LIST:
name = ddict['name']
path = ddict['path']
allow_empty = ddict.get('allowempty', False)
has_dist_file, dist_file_content = self._get_dist_file_content(path, allow_empty=allow_empty)
# but we allow_empty. For example, ArchLinux with an empty /etc/arch-release and a
# /etc/os-release with a different name
if has_dist_file and allow_empty:
dist_file_facts['distribution'] = name
dist_file_facts['distribution_file_path'] = path
dist_file_facts['distribution_file_variety'] = name
break
if not has_dist_file:
# keep looking
continue
parsed_dist_file, parsed_dist_file_facts = self._parse_dist_file(name, dist_file_content, path, dist_file_facts)
# finally found the right os dist file and were able to parse it
if parsed_dist_file:
dist_file_facts['distribution'] = name
dist_file_facts['distribution_file_path'] = path
# distribution and file_variety are the same here, but distribution
# will be changed/mapped to a more specific name.
# ie, dist=Fedora, file_variety=RedHat
dist_file_facts['distribution_file_variety'] = name
dist_file_facts['distribution_file_parsed'] = parsed_dist_file
dist_file_facts.update(parsed_dist_file_facts)
break
return dist_file_facts
# TODO: FIXME: split distro file parsing into its own module or class
def parse_distribution_file_Slackware(self, name, data, path, collected_facts):
slackware_facts = {}
if 'Slackware' not in data:
return False, slackware_facts # TODO: remove
slackware_facts['distribution'] = name
version = re.findall(r'\w+[.]\w+\+?', data)
if version:
slackware_facts['distribution_version'] = version[0]
return True, slackware_facts
def parse_distribution_file_Amazon(self, name, data, path, collected_facts):
amazon_facts = {}
if 'Amazon' not in data:
return False, amazon_facts
amazon_facts['distribution'] = 'Amazon'
if path == '/etc/os-release':
version = re.search(r"VERSION_ID=\"(.*)\"", data)
if version:
distribution_version = version.group(1)
amazon_facts['distribution_version'] = distribution_version
version_data = distribution_version.split(".")
if len(version_data) > 1:
major, minor = version_data
else:
major, minor = version_data[0], 'NA'
amazon_facts['distribution_major_version'] = major
amazon_facts['distribution_minor_version'] = minor
else:
version = [n for n in data.split() if n.isdigit()]
version = version[0] if version else 'NA'
amazon_facts['distribution_version'] = version
return True, amazon_facts
def parse_distribution_file_OpenWrt(self, name, data, path, collected_facts):
openwrt_facts = {}
if 'OpenWrt' not in data:
return False, openwrt_facts # TODO: remove
openwrt_facts['distribution'] = name
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
openwrt_facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
openwrt_facts['distribution_release'] = release.groups()[0]
return True, openwrt_facts
def parse_distribution_file_Alpine(self, name, data, path, collected_facts):
alpine_facts = {}
alpine_facts['distribution'] = 'Alpine'
alpine_facts['distribution_version'] = data
return True, alpine_facts
def parse_distribution_file_SUSE(self, name, data, path, collected_facts):
suse_facts = {}
if 'suse' not in data.lower():
return False, suse_facts # TODO: remove if tested without this
if path == '/etc/os-release':
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution:
suse_facts['distribution'] = distribution.group(1).strip('"')
# example pattern are 13.04 13.0 13
distribution_version = re.search(r'^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line)
if distribution_version:
suse_facts['distribution_version'] = distribution_version.group(1)
suse_facts['distribution_major_version'] = distribution_version.group(1).split('.')[0]
if 'open' in data.lower():
release = re.search(r'^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
if release:
suse_facts['distribution_release'] = release.groups()[0]
elif 'enterprise' in data.lower() and 'VERSION_ID' in line:
# SLES doesn't got funny release names
release = re.search(r'^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
if release.group(1):
release = release.group(1)
else:
release = "0" # no minor number, so it is the first release
suse_facts['distribution_release'] = release
elif path == '/etc/SuSE-release':
if 'open' in data.lower():
data = data.splitlines()
distdata = get_file_content(path).splitlines()[0]
suse_facts['distribution'] = distdata.split()[0]
for line in data:
release = re.search('CODENAME *= *([^\n]+)', line)
if release:
suse_facts['distribution_release'] = release.groups()[0].strip()
elif 'enterprise' in data.lower():
lines = data.splitlines()
distribution = lines[0].split()[0]
if "Server" in data:
suse_facts['distribution'] = "SLES"
elif "Desktop" in data:
suse_facts['distribution'] = "SLED"
for line in lines:
release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names
if release:
suse_facts['distribution_release'] = release.group(1)
suse_facts['distribution_version'] = collected_facts['distribution_version'] + '.' + release.group(1)
# See https://www.suse.com/support/kb/doc/?id=000019341 for SLES for SAP
if os.path.islink('/etc/products.d/baseproduct') and os.path.realpath('/etc/products.d/baseproduct').endswith('SLES_SAP.prod'):
suse_facts['distribution'] = 'SLES_SAP'
return True, suse_facts
def parse_distribution_file_Debian(self, name, data, path, collected_facts):
debian_facts = {}
if 'Debian' in data or 'Raspbian' in data:
debian_facts['distribution'] = 'Debian'
release = re.search(r"PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
# Last resort: try to find release from tzdata as either lsb is missing or this is very old debian
if collected_facts['distribution_release'] == 'NA' and 'Debian' in data:
dpkg_cmd = self.module.get_bin_path('dpkg')
if dpkg_cmd:
cmd = "%s --status tzdata|grep Provides|cut -f2 -d'-'" % dpkg_cmd
rc, out, err = self.module.run_command(cmd)
if rc == 0:
debian_facts['distribution_release'] = out.strip()
debian_version_path = '/etc/debian_version'
distdata = get_file_lines(debian_version_path)
for line in distdata:
m = re.search(r'(\d+)\.(\d+)', line.strip())
if m:
debian_facts['distribution_minor_version'] = m.groups()[1]
elif 'Ubuntu' in data:
debian_facts['distribution'] = 'Ubuntu'
# nothing else to do, Ubuntu gets correct info from python functions
elif 'SteamOS' in data:
debian_facts['distribution'] = 'SteamOS'
# nothing else to do, SteamOS gets correct info from python functions
elif path in ('/etc/lsb-release', '/etc/os-release') and ('Kali' in data or 'Parrot' in data):
if 'Kali' in data:
# Kali does not provide /etc/lsb-release anymore
debian_facts['distribution'] = 'Kali'
elif 'Parrot' in data:
debian_facts['distribution'] = 'Parrot'
release = re.search('DISTRIB_RELEASE=(.*)', data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
elif 'Devuan' in data:
debian_facts['distribution'] = 'Devuan'
release = re.search(r"PRETTY_NAME=\"?[^(\"]+ \(?([^) \"]+)\)?", data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
version = re.search(r"VERSION_ID=\"(.*)\"", data)
if version:
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = version.group(1)
elif 'Cumulus' in data:
debian_facts['distribution'] = 'Cumulus Linux'
version = re.search(r"VERSION_ID=(.*)", data)
if version:
major, _minor, _dummy_ver = version.group(1).split(".")
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = major
release = re.search(r'VERSION="(.*)"', data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
elif "Mint" in data:
debian_facts['distribution'] = 'Linux Mint'
version = re.search(r"VERSION_ID=\"(.*)\"", data)
if version:
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = version.group(1).split('.')[0]
elif 'UOS' in data or 'Uos' in data or 'uos' in data:
debian_facts['distribution'] = 'Uos'
release = re.search(r"VERSION_CODENAME=\"?([^\"]+)\"?", data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
version = re.search(r"VERSION_ID=\"(.*)\"", data)
if version:
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = version.group(1).split('.')[0]
elif 'Deepin' in data or 'deepin' in data:
debian_facts['distribution'] = 'Deepin'
release = re.search(r"VERSION_CODENAME=\"?([^\"]+)\"?", data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
version = re.search(r"VERSION_ID=\"(.*)\"", data)
if version:
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = version.group(1).split('.')[0]
else:
return False, debian_facts
return True, debian_facts
def parse_distribution_file_Mandriva(self, name, data, path, collected_facts):
mandriva_facts = {}
if 'Mandriva' in data:
mandriva_facts['distribution'] = 'Mandriva'
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
mandriva_facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
mandriva_facts['distribution_release'] = release.groups()[0]
mandriva_facts['distribution'] = name
else:
return False, mandriva_facts
return True, mandriva_facts
def parse_distribution_file_NA(self, name, data, path, collected_facts):
na_facts = {}
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution and name == 'NA':
na_facts['distribution'] = distribution.group(1).strip('"')
version = re.search("^VERSION=(.*)", line)
if version and collected_facts['distribution_version'] == 'NA':
na_facts['distribution_version'] = version.group(1).strip('"')
return True, na_facts
def parse_distribution_file_Coreos(self, name, data, path, collected_facts):
coreos_facts = {}
# FIXME: pass in ro copy of facts for this kind of thing
distro = get_distribution()
if distro.lower() == 'coreos':
if not data:
# include fix from #15230, #15228
# TODO: verify this is ok for above bugs
return False, coreos_facts
release = re.search("^GROUP=(.*)", data)
if release:
coreos_facts['distribution_release'] = release.group(1).strip('"')
else:
return False, coreos_facts # TODO: remove if tested without this
return True, coreos_facts
def parse_distribution_file_Flatcar(self, name, data, path, collected_facts):
flatcar_facts = {}
distro = get_distribution()
if distro.lower() == 'flatcar':
if not data:
return False, flatcar_facts
release = re.search("^GROUP=(.*)", data)
if release:
flatcar_facts['distribution_release'] = release.group(1).strip('"')
else:
return False, flatcar_facts
return True, flatcar_facts
def parse_distribution_file_ClearLinux(self, name, data, path, collected_facts):
clear_facts = {}
if "clearlinux" not in name.lower():
return False, clear_facts
pname = re.search('NAME="(.*)"', data)
if pname:
if 'Clear Linux' not in pname.groups()[0]:
return False, clear_facts
clear_facts['distribution'] = pname.groups()[0]
version = re.search('VERSION_ID=(.*)', data)
if version:
clear_facts['distribution_major_version'] = version.groups()[0]
clear_facts['distribution_version'] = version.groups()[0]
release = re.search('ID=(.*)', data)
if release:
clear_facts['distribution_release'] = release.groups()[0]
return True, clear_facts
def parse_distribution_file_CentOS(self, name, data, path, collected_facts):
centos_facts = {}
if 'CentOS Stream' in data:
centos_facts['distribution_release'] = 'Stream'
return True, centos_facts
if "TencentOS Server" in data:
centos_facts['distribution'] = 'TencentOS'
return True, centos_facts
return False, centos_facts
class Distribution(object):
"""
This subclass of Facts fills the distribution, distribution_version and distribution_release variables
To do so it checks the existence and content of typical files in /etc containing distribution information
This is unit tested. Please extend the tests to cover all distributions if you have them available.
"""
# keep keys in sync with Conditionals page of docs
OS_FAMILY_MAP = {'RedHat': ['RedHat', 'RHEL', 'Fedora', 'CentOS', 'Scientific', 'SLC',
'Ascendos', 'CloudLinux', 'PSBM', 'OracleLinux', 'OVS',
'OEL', 'Amazon', 'Virtuozzo', 'XenServer', 'Alibaba',
'EulerOS', 'openEuler', 'AlmaLinux', 'Rocky', 'TencentOS',
'EuroLinux', 'Kylin Linux Advanced Server'],
'Debian': ['Debian', 'Ubuntu', 'Raspbian', 'Neon', 'KDE neon',
'Linux Mint', 'SteamOS', 'Devuan', 'Kali', 'Cumulus Linux',
'Pop!_OS', 'Parrot', 'Pardus GNU/Linux', 'Uos', 'Deepin'],
'Suse': ['SuSE', 'SLES', 'SLED', 'openSUSE', 'openSUSE Tumbleweed',
'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap'],
'Archlinux': ['Archlinux', 'Antergos', 'Manjaro'],
'Mandrake': ['Mandrake', 'Mandriva'],
'Solaris': ['Solaris', 'Nexenta', 'OmniOS', 'OpenIndiana', 'SmartOS'],
'Slackware': ['Slackware'],
'Altlinux': ['Altlinux'],
'SGML': ['SGML'],
'Gentoo': ['Gentoo', 'Funtoo'],
'Alpine': ['Alpine'],
'AIX': ['AIX'],
'HP-UX': ['HPUX'],
'Darwin': ['MacOSX'],
'FreeBSD': ['FreeBSD', 'TrueOS'],
'ClearLinux': ['Clear Linux OS', 'Clear Linux Mix'],
'DragonFly': ['DragonflyBSD', 'DragonFlyBSD', 'Gentoo/DragonflyBSD', 'Gentoo/DragonFlyBSD'],
'NetBSD': ['NetBSD'], }
OS_FAMILY = {}
for family, names in OS_FAMILY_MAP.items():
for name in names:
OS_FAMILY[name] = family
def __init__(self, module):
self.module = module
def get_distribution_facts(self):
distribution_facts = {}
# The platform module provides information about the running
# system/distribution. Use this as a baseline and fix buggy systems
# afterwards
system = platform.system()
distribution_facts['distribution'] = system
distribution_facts['distribution_release'] = platform.release()
distribution_facts['distribution_version'] = platform.version()
systems_implemented = ('AIX', 'HP-UX', 'Darwin', 'FreeBSD', 'OpenBSD', 'SunOS', 'DragonFly', 'NetBSD')
if system in systems_implemented:
cleanedname = system.replace('-', '')
distfunc = getattr(self, 'get_distribution_' + cleanedname)
dist_func_facts = distfunc()
distribution_facts.update(dist_func_facts)
elif system == 'Linux':
distribution_files = DistributionFiles(module=self.module)
# linux_distribution_facts = LinuxDistribution(module).get_distribution_facts()
dist_file_facts = distribution_files.process_dist_files()
distribution_facts.update(dist_file_facts)
distro = distribution_facts['distribution']
# look for a os family alias for the 'distribution', if there isnt one, use 'distribution'
distribution_facts['os_family'] = self.OS_FAMILY.get(distro, None) or distro
return distribution_facts
def get_distribution_AIX(self):
aix_facts = {}
rc, out, err = self.module.run_command("/usr/bin/oslevel")
data = out.split('.')
aix_facts['distribution_major_version'] = data[0]
if len(data) > 1:
aix_facts['distribution_version'] = '%s.%s' % (data[0], data[1])
aix_facts['distribution_release'] = data[1]
else:
aix_facts['distribution_version'] = data[0]
return aix_facts
def get_distribution_HPUX(self):
hpux_facts = {}
rc, out, err = self.module.run_command(r"/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
data = re.search(r'HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
if data:
hpux_facts['distribution_version'] = data.groups()[0]
hpux_facts['distribution_release'] = data.groups()[1]
return hpux_facts
def get_distribution_Darwin(self):
darwin_facts = {}
darwin_facts['distribution'] = 'MacOSX'
rc, out, err = self.module.run_command("/usr/bin/sw_vers -productVersion")
data = out.split()[-1]
if data:
darwin_facts['distribution_major_version'] = data.split('.')[0]
darwin_facts['distribution_version'] = data
return darwin_facts
def get_distribution_FreeBSD(self):
freebsd_facts = {}
freebsd_facts['distribution_release'] = platform.release()
data = re.search(r'(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT|RC|PRERELEASE).*', freebsd_facts['distribution_release'])
if 'trueos' in platform.version():
freebsd_facts['distribution'] = 'TrueOS'
if data:
freebsd_facts['distribution_major_version'] = data.group(1)
freebsd_facts['distribution_version'] = '%s.%s' % (data.group(1), data.group(2))
return freebsd_facts
def get_distribution_OpenBSD(self):
openbsd_facts = {}
openbsd_facts['distribution_version'] = platform.release()
rc, out, err = self.module.run_command("/sbin/sysctl -n kern.version")
match = re.match(r'OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
if match:
openbsd_facts['distribution_release'] = match.groups()[0]
else:
openbsd_facts['distribution_release'] = 'release'
return openbsd_facts
def get_distribution_DragonFly(self):
dragonfly_facts = {
'distribution_release': platform.release()
}
rc, out, dummy = self.module.run_command("/sbin/sysctl -n kern.version")
match = re.search(r'v(\d+)\.(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT).*', out)
if match:
dragonfly_facts['distribution_major_version'] = match.group(1)
dragonfly_facts['distribution_version'] = '%s.%s.%s' % match.groups()[:3]
return dragonfly_facts
def get_distribution_NetBSD(self):
netbsd_facts = {}
platform_release = platform.release()
netbsd_facts['distribution_release'] = platform_release
rc, out, dummy = self.module.run_command("/sbin/sysctl -n kern.version")
match = re.match(r'NetBSD\s(\d+)\.(\d+)\s\((GENERIC)\).*', out)
if match:
netbsd_facts['distribution_major_version'] = match.group(1)
netbsd_facts['distribution_version'] = '%s.%s' % match.groups()[:2]
else:
netbsd_facts['distribution_major_version'] = platform_release.split('.')[0]
netbsd_facts['distribution_version'] = platform_release
return netbsd_facts
def get_distribution_SMGL(self):
smgl_facts = {}
smgl_facts['distribution'] = 'Source Mage GNU/Linux'
return smgl_facts
def get_distribution_SunOS(self):
sunos_facts = {}
data = get_file_content('/etc/release').splitlines()[0]
if 'Solaris' in data:
# for solaris 10 uname_r will contain 5.10, for solaris 11 it will have 5.11
uname_r = get_uname(self.module, flags=['-r'])
ora_prefix = ''
if 'Oracle Solaris' in data:
data = data.replace('Oracle ', '')
ora_prefix = 'Oracle '
sunos_facts['distribution'] = data.split()[0]
sunos_facts['distribution_version'] = data.split()[1]
sunos_facts['distribution_release'] = ora_prefix + data
sunos_facts['distribution_major_version'] = uname_r.split('.')[1].rstrip()
return sunos_facts
uname_v = get_uname(self.module, flags=['-v'])
distribution_version = None
if 'SmartOS' in data:
sunos_facts['distribution'] = 'SmartOS'
if _file_exists('/etc/product'):
product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').splitlines() if ': ' in l])
if 'Image' in product_data:
distribution_version = product_data.get('Image').split()[-1]
elif 'OpenIndiana' in data:
sunos_facts['distribution'] = 'OpenIndiana'
elif 'OmniOS' in data:
sunos_facts['distribution'] = 'OmniOS'
distribution_version = data.split()[-1]
elif uname_v is not None and 'NexentaOS_' in uname_v:
sunos_facts['distribution'] = 'Nexenta'
distribution_version = data.split()[-1].lstrip('v')
if sunos_facts.get('distribution', '') in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'):
sunos_facts['distribution_release'] = data.strip()
if distribution_version is not None:
sunos_facts['distribution_version'] = distribution_version
elif uname_v is not None:
sunos_facts['distribution_version'] = uname_v.splitlines()[0].strip()
return sunos_facts
return sunos_facts
class DistributionFactCollector(BaseFactCollector):
name = 'distribution'
_fact_ids = set(['distribution_version',
'distribution_release',
'distribution_major_version',
'os_family']) # type: t.Set[str]
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
facts_dict = {}
if not module:
return facts_dict
distribution = Distribution(module=module)
distro_facts = distribution.get_distribution_facts()
return distro_facts
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 77,537 |
Wrong OS family fact reported for Flatcar
|
### Summary
Ansible may report the wrong `ansible_os_family` fact on Flatcar Container Linux. The correct value is `Flatcar`, however some users are seeing `Flatcar Container Linux by Kinvolk` for the same fact under certain circumstances, which leads to wrong playbook behavior given that Ansible fails to identify Flatcar as the running distro.
In https://github.com/ansible/ansible/pull/69627 we've contributed Ansible core logic which affects the value of `ansible_os_family` for Flatcar. Back then we've specified `/etc/flatcar/update.conf` as the file based on which to figure out the name of the distro (likely because that's how CoreOS - Flatcar's direct ancestor - used to do the same). **This decision no longer makes sense to us** given that this file isn't the authoritative place for the distro name. Moreover, `/etc/flatcar/update.conf` may sometimes be missing based on user-provided bootstrap configuration.
### Issue Type
Bug Report
### Component Name
Ansible core
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = None
configured module search path = ['/home/me/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/me/.local/lib/python3.8/site-packages/ansible
ansible collection location = /home/me/.ansible/collections:/usr/share/ansible/collections
executable location = /home/me/.local/bin/ansible
python version = 3.8.10 (default, Nov 26 2021, 20:14:08) [GCC 9.3.0]
jinja version = 3.1.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
Flatcar Container Linux 3139.2.0 for example
### Steps to Reproduce
Follow the official [instructions](https://www.flatcar.org/docs/latest/installing/vms/qemu/) to run Flatcar locally on QEMU:
```
mkdir flatcar; cd flatcar
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu.sh
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu.sh.sig
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu_image.img.bz2
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu_image.img.bz2.sig
bzip2 -d flatcar_production_qemu_image.img.bz2
chmod +x flatcar_production_qemu.sh
ssh-keygen -f key -q -N ""
./flatcar_production_qemu.sh -a ./key.pub -- -nographic
```
On another shell, SSH into the Flatcar VM:
```
cd flatcar
ssh -p 2222 -i key core@localhost
```
Install PyPy (Flatcar doesn't ship with a Python interpreter):
```
cd /opt
wget -O - https://downloads.python.org/pypy/pypy3.8-v7.3.9-linux64.tar.bz2 | sudo tar xjf -
sudo ln -s /opt/pypy3.8-v7.3.9-linux64/bin/pypy /opt/bin/python
```
Run Ansible against the VM and print the OS family:
```
cat <<EOF >playbook.yaml
- hosts: all
user: core
tasks:
- name: Print distro
debug:
var: ansible_os_family
EOF
ansible-playbook playbook.yaml -i localhost:2222, --key-file ./key -e ansible_python_interpreter=/opt/bin/python -e ansible_port=2222
```
Output:
```
PLAY [all] *****************************************************************************************************************************************************************************************************************************************************
TASK [Gathering Facts] *****************************************************************************************************************************************************************************************************************************************ok: [localhost]
TASK [Print distro] ********************************************************************************************************************************************************************************************************************************************ok: [localhost] => {
"ansible_os_family": "Flatcar Container Linux by Kinvolk"
}
PLAY RECAP *****************************************************************************************************************************************************************************************************************************************************localhost : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
SSH into the VM and populate `/etc/flatcar/update.conf` with dummy values:
```
printf "SERVER=foo\nGROUP=bar\n" | sudo tee /etc/flatcar/update.conf
```
Run the play book again:
```
ansible-playbook playbook.yaml -i localhost:2222, --key-file ./key -e ansible_python_interpreter=/opt/bin/python -e ansible_port=2222
```
Output:
```
PLAY [all] *****************************************************************************************************************************************************************************************************************************************************
TASK [Gathering Facts] *****************************************************************************************************************************************************************************************************************************************ok: [localhost]
TASK [Print distro] ********************************************************************************************************************************************************************************************************************************************ok: [localhost] => {
"ansible_os_family": "Flatcar"
}
PLAY RECAP *****************************************************************************************************************************************************************************************************************************************************localhost : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Expected Results
I expected to always see `Flatcar` as `ansible_os_family` on Flatcar Container Linux.
### Actual Results
```console
The value of `ansible_os_family` changes based on the existence of `/etc/flatcar/update.conf`.
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/77537
|
https://github.com/ansible/ansible/pull/77635
|
77ba025a1301c62dd945fd0f18153c5eef9a0b77
|
fbd828673de3d4eed525a982d75ace39a1f9eef1
| 2022-04-14T13:04:38Z |
python
| 2022-09-07T17:14:12Z |
test/units/module_utils/facts/system/distribution/fixtures/flatcar_2492.0.0.json
|
{
"name": "Flatcar Container Linux",
"input": {
"/usr/lib/os-release": "NAME=\"Flatcar Container Linux by Kinvolk\"\nID=flatcar\nID_LIKE=coreos\nVERSION=2492.0.0\nVERSION_ID=2492.0.0\nBUILD_ID=2020-04-28-2210\nPRETTY_NAME=\"Flatcar Container Linux by Kinvolk 2492.0.0 (Rhyolite)\"\nANSI_COLOR=\"38;5;75\"\nHOME_URL=\"https://flatcar-linux.org/\"\nBUG_REPORT_URL=\"https://issues.flatcar-linux.org\"",
"/etc/lsb-release": "DISTRIB_ID=\"Flatcar Container Linux by Kinvolk\"\nDISTRIB_RELEASE=2492.0.0\nDISTRIB_CODENAME=\"Rhyolite\"\nDISTRIB_DESCRIPTION=\"Flatcar Container Linux by Kinvolk 2492.0.0 (Rhyolite)\""
},
"platform.dist": ["", "", ""],
"distro": {
"codename": "Rhyolite",
"id": "flatcar",
"id_like": "coreos",
"name": "Flatcar",
"version": "2492.0.0",
"version_best": "2492.0.0",
"os_release_info": {},
"lsb_release_info": {}
},
"platform.release": "",
"result": {
"distribution": "Flatcar",
"distribution_major_version": "2492",
"distribution_version": "2492.0.0"
}
}
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 77,537 |
Wrong OS family fact reported for Flatcar
|
### Summary
Ansible may report the wrong `ansible_os_family` fact on Flatcar Container Linux. The correct value is `Flatcar`, however some users are seeing `Flatcar Container Linux by Kinvolk` for the same fact under certain circumstances, which leads to wrong playbook behavior given that Ansible fails to identify Flatcar as the running distro.
In https://github.com/ansible/ansible/pull/69627 we've contributed Ansible core logic which affects the value of `ansible_os_family` for Flatcar. Back then we've specified `/etc/flatcar/update.conf` as the file based on which to figure out the name of the distro (likely because that's how CoreOS - Flatcar's direct ancestor - used to do the same). **This decision no longer makes sense to us** given that this file isn't the authoritative place for the distro name. Moreover, `/etc/flatcar/update.conf` may sometimes be missing based on user-provided bootstrap configuration.
### Issue Type
Bug Report
### Component Name
Ansible core
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = None
configured module search path = ['/home/me/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/me/.local/lib/python3.8/site-packages/ansible
ansible collection location = /home/me/.ansible/collections:/usr/share/ansible/collections
executable location = /home/me/.local/bin/ansible
python version = 3.8.10 (default, Nov 26 2021, 20:14:08) [GCC 9.3.0]
jinja version = 3.1.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
Flatcar Container Linux 3139.2.0 for example
### Steps to Reproduce
Follow the official [instructions](https://www.flatcar.org/docs/latest/installing/vms/qemu/) to run Flatcar locally on QEMU:
```
mkdir flatcar; cd flatcar
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu.sh
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu.sh.sig
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu_image.img.bz2
wget https://stable.release.flatcar-linux.net/amd64-usr/3139.2.0/flatcar_production_qemu_image.img.bz2.sig
bzip2 -d flatcar_production_qemu_image.img.bz2
chmod +x flatcar_production_qemu.sh
ssh-keygen -f key -q -N ""
./flatcar_production_qemu.sh -a ./key.pub -- -nographic
```
On another shell, SSH into the Flatcar VM:
```
cd flatcar
ssh -p 2222 -i key core@localhost
```
Install PyPy (Flatcar doesn't ship with a Python interpreter):
```
cd /opt
wget -O - https://downloads.python.org/pypy/pypy3.8-v7.3.9-linux64.tar.bz2 | sudo tar xjf -
sudo ln -s /opt/pypy3.8-v7.3.9-linux64/bin/pypy /opt/bin/python
```
Run Ansible against the VM and print the OS family:
```
cat <<EOF >playbook.yaml
- hosts: all
user: core
tasks:
- name: Print distro
debug:
var: ansible_os_family
EOF
ansible-playbook playbook.yaml -i localhost:2222, --key-file ./key -e ansible_python_interpreter=/opt/bin/python -e ansible_port=2222
```
Output:
```
PLAY [all] *****************************************************************************************************************************************************************************************************************************************************
TASK [Gathering Facts] *****************************************************************************************************************************************************************************************************************************************ok: [localhost]
TASK [Print distro] ********************************************************************************************************************************************************************************************************************************************ok: [localhost] => {
"ansible_os_family": "Flatcar Container Linux by Kinvolk"
}
PLAY RECAP *****************************************************************************************************************************************************************************************************************************************************localhost : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
SSH into the VM and populate `/etc/flatcar/update.conf` with dummy values:
```
printf "SERVER=foo\nGROUP=bar\n" | sudo tee /etc/flatcar/update.conf
```
Run the play book again:
```
ansible-playbook playbook.yaml -i localhost:2222, --key-file ./key -e ansible_python_interpreter=/opt/bin/python -e ansible_port=2222
```
Output:
```
PLAY [all] *****************************************************************************************************************************************************************************************************************************************************
TASK [Gathering Facts] *****************************************************************************************************************************************************************************************************************************************ok: [localhost]
TASK [Print distro] ********************************************************************************************************************************************************************************************************************************************ok: [localhost] => {
"ansible_os_family": "Flatcar"
}
PLAY RECAP *****************************************************************************************************************************************************************************************************************************************************localhost : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Expected Results
I expected to always see `Flatcar` as `ansible_os_family` on Flatcar Container Linux.
### Actual Results
```console
The value of `ansible_os_family` changes based on the existence of `/etc/flatcar/update.conf`.
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/77537
|
https://github.com/ansible/ansible/pull/77635
|
77ba025a1301c62dd945fd0f18153c5eef9a0b77
|
fbd828673de3d4eed525a982d75ace39a1f9eef1
| 2022-04-14T13:04:38Z |
python
| 2022-09-07T17:14:12Z |
test/units/module_utils/facts/system/distribution/fixtures/flatcar_3139.2.0.json
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 77,315 |
Empty "META: " lines printed
|
### Summary
My complex `site.yml` causes "empty" `META: ` lines being printed often together with other `META: ` lines such as `ran handlers`, `role_complete`.
The line is:
```
META:
```
There is a space after the `META:` but after that there isn't any reason such as `ran handlers` or `role_complete`.
Normal/valid `META: ` lines with a "reason" often but not always have these "empty" `META: ` lines near them.
I cannot duplicate this issue with a simple playbook or a simple playbook + role. My `site.yml` uses multiple layers of `include_role` and so on so it is difficult to try to simplify it into test case. But perhaps it could be possible to look at how that "empty" `META: ` line could be possible.
### Issue Type
Bug Report
### Component Name
ansible
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.9]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/x/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /y/lib64/python3.6/site-packages/ansible
ansible collection location = /x/.ansible/collections:/usr/share/ansible/collections
executable location = /y/bin/ansible
python version = 3.6.8 (default, Nov 16 2020, 16:55:22) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)]
jinja version = 3.0.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
CACHE_PLUGIN(/etc/ansible/ansible.cfg) = community.general.yaml
CACHE_PLUGIN_CONNECTION(/etc/ansible/ansible.cfg) = /tmp/.ansible-fact.$USER
CACHE_PLUGIN_TIMEOUT(/etc/ansible/ansible.cfg) = 5184000
CALLBACKS_ENABLED(/etc/ansible/ansible.cfg) = ['ansible.posix.timer']
CONTROLLER_PYTHON_WARNING(/etc/ansible/ansible.cfg) = False
DEFAULT_FORKS(/etc/ansible/ansible.cfg) = 10
DEFAULT_LOCAL_TMP(/etc/ansible/ansible.cfg) = /tmp/.ansible.x/ansible-local-20276c69v2u7m
DEFAULT_LOG_PATH(/etc/ansible/ansible.cfg) = /var/log/ansible.log
DEFAULT_STDOUT_CALLBACK(/etc/ansible/ansible.cfg) = community.general.yaml
DEFAULT_STRATEGY(/etc/ansible/ansible.cfg) = ansible.builtin.free
DEFAULT_TIMEOUT(/etc/ansible/ansible.cfg) = 50
DEFAULT_VAULT_IDENTITY_LIST(env: ANSIBLE_VAULT_IDENTITY_LIST) = ['a@v1', 'b@v2']
DEFAULT_VAULT_ID_MATCH(/etc/ansible/ansible.cfg) = True
INJECT_FACTS_AS_VARS(/etc/ansible/ansible.cfg) = False
RETRY_FILES_SAVE_PATH(/etc/ansible/ansible.cfg) = /tmp/.ansible-retry.x
```
### OS / Environment
EL7
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
could not reproduce with a naive test case, it is visible in my site.yml that contains total ~500 .yml files across dozens of roles
```
### Expected Results
There should be no spurious `META: ` lines with no further information on what it is about.
### Actual Results
```console
example 1:
2022-03-17 09:34:47,350 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,359 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,368 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,377 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,386 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,396 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,405 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,415 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,425 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,435 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,443 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,452 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,461 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,470 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,479 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,597 p=13119 u=user n=ansible | ok: [host.example.com]
2022-03-17 09:34:47,612 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,621 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,631 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,640 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,649 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,659 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,669 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,679 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,689 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,698 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,706 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,715 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,724 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,733 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,741 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,751 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:48,056 p=13119 u=user n=ansible | META:
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/77315
|
https://github.com/ansible/ansible/pull/78681
|
a26c325bd8f6e2822d9d7e62f77a424c1db4fbf6
|
a6d4c3ff7cf43c24be6622102cee834fc5096496
| 2022-03-18T07:29:16Z |
python
| 2022-09-13T07:50:10Z |
changelogs/fragments/77315-fix-meta-vv-header.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 77,315 |
Empty "META: " lines printed
|
### Summary
My complex `site.yml` causes "empty" `META: ` lines being printed often together with other `META: ` lines such as `ran handlers`, `role_complete`.
The line is:
```
META:
```
There is a space after the `META:` but after that there isn't any reason such as `ran handlers` or `role_complete`.
Normal/valid `META: ` lines with a "reason" often but not always have these "empty" `META: ` lines near them.
I cannot duplicate this issue with a simple playbook or a simple playbook + role. My `site.yml` uses multiple layers of `include_role` and so on so it is difficult to try to simplify it into test case. But perhaps it could be possible to look at how that "empty" `META: ` line could be possible.
### Issue Type
Bug Report
### Component Name
ansible
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.9]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/x/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /y/lib64/python3.6/site-packages/ansible
ansible collection location = /x/.ansible/collections:/usr/share/ansible/collections
executable location = /y/bin/ansible
python version = 3.6.8 (default, Nov 16 2020, 16:55:22) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)]
jinja version = 3.0.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
CACHE_PLUGIN(/etc/ansible/ansible.cfg) = community.general.yaml
CACHE_PLUGIN_CONNECTION(/etc/ansible/ansible.cfg) = /tmp/.ansible-fact.$USER
CACHE_PLUGIN_TIMEOUT(/etc/ansible/ansible.cfg) = 5184000
CALLBACKS_ENABLED(/etc/ansible/ansible.cfg) = ['ansible.posix.timer']
CONTROLLER_PYTHON_WARNING(/etc/ansible/ansible.cfg) = False
DEFAULT_FORKS(/etc/ansible/ansible.cfg) = 10
DEFAULT_LOCAL_TMP(/etc/ansible/ansible.cfg) = /tmp/.ansible.x/ansible-local-20276c69v2u7m
DEFAULT_LOG_PATH(/etc/ansible/ansible.cfg) = /var/log/ansible.log
DEFAULT_STDOUT_CALLBACK(/etc/ansible/ansible.cfg) = community.general.yaml
DEFAULT_STRATEGY(/etc/ansible/ansible.cfg) = ansible.builtin.free
DEFAULT_TIMEOUT(/etc/ansible/ansible.cfg) = 50
DEFAULT_VAULT_IDENTITY_LIST(env: ANSIBLE_VAULT_IDENTITY_LIST) = ['a@v1', 'b@v2']
DEFAULT_VAULT_ID_MATCH(/etc/ansible/ansible.cfg) = True
INJECT_FACTS_AS_VARS(/etc/ansible/ansible.cfg) = False
RETRY_FILES_SAVE_PATH(/etc/ansible/ansible.cfg) = /tmp/.ansible-retry.x
```
### OS / Environment
EL7
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
could not reproduce with a naive test case, it is visible in my site.yml that contains total ~500 .yml files across dozens of roles
```
### Expected Results
There should be no spurious `META: ` lines with no further information on what it is about.
### Actual Results
```console
example 1:
2022-03-17 09:34:47,350 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,359 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,368 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,377 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,386 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,396 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,405 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,415 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,425 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,435 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,443 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,452 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,461 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,470 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,479 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,597 p=13119 u=user n=ansible | ok: [host.example.com]
2022-03-17 09:34:47,612 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,621 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,631 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,640 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,649 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,659 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,669 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,679 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,689 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,698 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,706 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,715 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,724 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,733 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,741 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,751 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:48,056 p=13119 u=user n=ansible | META:
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/77315
|
https://github.com/ansible/ansible/pull/78681
|
a26c325bd8f6e2822d9d7e62f77a424c1db4fbf6
|
a6d4c3ff7cf43c24be6622102cee834fc5096496
| 2022-03-18T07:29:16Z |
python
| 2022-09-13T07:50:10Z |
lib/ansible/plugins/strategy/__init__.py
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import cmd
import functools
import os
import pprint
import queue
import sys
import threading
import time
from collections import deque
from multiprocessing import Lock
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleUndefinedVariable, AnsibleParserError
from ansible.executor import action_write_locks
from ansible.executor.play_iterator import IteratingStates
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_result import TaskResult
from ansible.executor.task_queue_manager import CallbackSend, DisplaySend
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.playbook.conditional import Conditional
from ansible.playbook.handler import Handler
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.plugins import loader as plugin_loader
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.fqcn import add_internal_fqcns
from ansible.utils.unsafe_proxy import wrap_var
from ansible.utils.vars import combine_vars
from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
display = Display()
__all__ = ['StrategyBase']
# This list can be an exact match, or start of string bound
# does not accept regex
ALWAYS_DELEGATE_FACT_PREFIXES = frozenset((
'discovered_interpreter_',
))
class StrategySentinel:
pass
_sentinel = StrategySentinel()
def post_process_whens(result, task, templar, task_vars):
cond = None
if task.changed_when:
with templar.set_temporary_context(available_variables=task_vars):
cond = Conditional(loader=templar._loader)
cond.when = task.changed_when
result['changed'] = cond.evaluate_conditional(templar, templar.available_variables)
if task.failed_when:
with templar.set_temporary_context(available_variables=task_vars):
if cond is None:
cond = Conditional(loader=templar._loader)
cond.when = task.failed_when
failed_when_result = cond.evaluate_conditional(templar, templar.available_variables)
result['failed_when_result'] = result['failed'] = failed_when_result
def _get_item_vars(result, task):
item_vars = {}
if task.loop or task.loop_with:
loop_var = result.get('ansible_loop_var', 'item')
index_var = result.get('ansible_index_var')
if loop_var in result:
item_vars[loop_var] = result[loop_var]
if index_var and index_var in result:
item_vars[index_var] = result[index_var]
if '_ansible_item_label' in result:
item_vars['_ansible_item_label'] = result['_ansible_item_label']
if 'ansible_loop' in result:
item_vars['ansible_loop'] = result['ansible_loop']
return item_vars
def results_thread_main(strategy):
while True:
try:
result = strategy._final_q.get()
if isinstance(result, StrategySentinel):
break
elif isinstance(result, DisplaySend):
display.display(*result.args, **result.kwargs)
elif isinstance(result, CallbackSend):
for arg in result.args:
if isinstance(arg, TaskResult):
strategy.normalize_task_result(arg)
break
strategy._tqm.send_callback(result.method_name, *result.args, **result.kwargs)
elif isinstance(result, TaskResult):
strategy.normalize_task_result(result)
with strategy._results_lock:
strategy._results.append(result)
else:
display.warning('Received an invalid object (%s) in the result queue: %r' % (type(result), result))
except (IOError, EOFError):
break
except queue.Empty:
pass
def debug_closure(func):
"""Closure to wrap ``StrategyBase._process_pending_results`` and invoke the task debugger"""
@functools.wraps(func)
def inner(self, iterator, one_pass=False, max_passes=None):
status_to_stats_map = (
('is_failed', 'failures'),
('is_unreachable', 'dark'),
('is_changed', 'changed'),
('is_skipped', 'skipped'),
)
# We don't know the host yet, copy the previous states, for lookup after we process new results
prev_host_states = iterator.host_states.copy()
results = func(self, iterator, one_pass=one_pass, max_passes=max_passes)
_processed_results = []
for result in results:
task = result._task
host = result._host
_queued_task_args = self._queued_task_cache.pop((host.name, task._uuid), None)
task_vars = _queued_task_args['task_vars']
play_context = _queued_task_args['play_context']
# Try to grab the previous host state, if it doesn't exist use get_host_state to generate an empty state
try:
prev_host_state = prev_host_states[host.name]
except KeyError:
prev_host_state = iterator.get_host_state(host)
while result.needs_debugger(globally_enabled=self.debugger_active):
next_action = NextAction()
dbg = Debugger(task, host, task_vars, play_context, result, next_action)
dbg.cmdloop()
if next_action.result == NextAction.REDO:
# rollback host state
self._tqm.clear_failed_hosts()
if task.run_once and iterator._play.strategy in add_internal_fqcns(('linear',)) and result.is_failed():
for host_name, state in prev_host_states.items():
if host_name == host.name:
continue
iterator.set_state_for_host(host_name, state)
iterator._play._removed_hosts.remove(host_name)
iterator.set_state_for_host(host.name, prev_host_state)
for method, what in status_to_stats_map:
if getattr(result, method)():
self._tqm._stats.decrement(what, host.name)
self._tqm._stats.decrement('ok', host.name)
# redo
self._queue_task(host, task, task_vars, play_context)
_processed_results.extend(debug_closure(func)(self, iterator, one_pass))
break
elif next_action.result == NextAction.CONTINUE:
_processed_results.append(result)
break
elif next_action.result == NextAction.EXIT:
# Matches KeyboardInterrupt from bin/ansible
sys.exit(99)
else:
_processed_results.append(result)
return _processed_results
return inner
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
# by default, strategies should support throttling but we allow individual
# strategies to disable this and either forego supporting it or managing
# the throttling internally (as `free` does)
ALLOW_BASE_THROTTLING = True
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm._workers
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
self._step = context.CLIARGS.get('step', False)
self._diff = context.CLIARGS.get('diff', False)
# the task cache is a dictionary of tuples of (host.name, task._uuid)
# used to find the original task object of in-flight tasks and to store
# the task args/vars and play context info used to queue the task.
self._queued_task_cache = {}
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
# internal counters
self._pending_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
self._results = deque()
self._results_lock = threading.Condition(threading.Lock())
# create the result processing thread for reading results in the background
self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
self._results_thread.daemon = True
self._results_thread.start()
# holds the list of active (persistent) connections to be shutdown at
# play completion
self._active_connections = dict()
# Caches for get_host calls, to avoid calling excessively
# These values should be set at the top of the ``run`` method of each
# strategy plugin. Use ``_set_hosts_cache`` to set these values
self._hosts_cache = []
self._hosts_cache_all = []
self.debugger_active = C.ENABLE_TASK_DEBUGGER
def _set_hosts_cache(self, play, refresh=True):
"""Responsible for setting _hosts_cache and _hosts_cache_all
See comment in ``__init__`` for the purpose of these caches
"""
if not refresh and all((self._hosts_cache, self._hosts_cache_all)):
return
if not play.finalized and Templar(None).is_template(play.hosts):
_pattern = 'all'
else:
_pattern = play.hosts or 'all'
self._hosts_cache_all = [h.name for h in self._inventory.get_hosts(pattern=_pattern, ignore_restrictions=True)]
self._hosts_cache = [h.name for h in self._inventory.get_hosts(play.hosts, order=play.order)]
def cleanup(self):
# close active persistent connections
for sock in self._active_connections.values():
try:
conn = Connection(sock)
conn.reset()
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
self._final_q.put(_sentinel)
self._results_thread.join()
def run(self, iterator, play_context, result=0):
# execute one more pass through the iterator without peeking, to
# make sure that all of the hosts are advanced to their final task.
# This should be safe, as everything should be IteratingStates.COMPLETE by
# this point, though the strategy may not advance the hosts itself.
for host in self._hosts_cache:
if host not in self._tqm._unreachable_hosts:
try:
iterator.get_next_task_for_host(self._inventory.hosts[host])
except KeyError:
iterator.get_next_task_for_host(self._inventory.get_host(host))
# return the appropriate code, depending on the status hosts after the run
if not isinstance(result, bool) and result != self._tqm.RUN_OK:
return result
elif len(self._tqm._unreachable_hosts.keys()) > 0:
return self._tqm.RUN_UNREACHABLE_HOSTS
elif len(iterator.get_failed_hosts()) > 0:
return self._tqm.RUN_FAILED_HOSTS
else:
return self._tqm.RUN_OK
def get_hosts_remaining(self, play):
self._set_hosts_cache(play, refresh=False)
ignore = set(self._tqm._failed_hosts).union(self._tqm._unreachable_hosts)
return [host for host in self._hosts_cache if host not in ignore]
def get_failed_hosts(self, play):
self._set_hosts_cache(play, refresh=False)
return [host for host in self._hosts_cache if host in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
'''
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
'''
vars['ansible_current_hosts'] = self.get_hosts_remaining(play)
vars['ansible_failed_hosts'] = self.get_failed_hosts(play)
def _queue_task(self, host, task, task_vars, play_context):
''' handles queueing the task up to be sent to a worker '''
display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
# Add a write lock for tasks.
# Maybe this should be added somewhere further up the call stack but
# this is the earliest in the code where we have task (1) extracted
# into its own variable and (2) there's only a single code path
# leading to the module being run. This is called by two
# functions: linear.py::run(), and
# free.py::run() so we'd have to add to both to do it there.
# The next common higher level is __init__.py::run() and that has
# tasks inside of play_iterator so we'd have to extract them to do it
# there.
if task.action not in action_write_locks.action_write_locks:
display.debug('Creating lock for %s' % task.action)
action_write_locks.action_write_locks[task.action] = Lock()
# create a templar and template things we need later for the queuing process
templar = Templar(loader=self._loader, variables=task_vars)
try:
throttle = int(templar.template(task.throttle))
except Exception as e:
raise AnsibleError("Failed to convert the throttle value to an integer.", obj=task._ds, orig_exc=e)
# and then queue the new task
try:
# Determine the "rewind point" of the worker list. This means we start
# iterating over the list of workers until the end of the list is found.
# Normally, that is simply the length of the workers list (as determined
# by the forks or serial setting), however a task/block/play may "throttle"
# that limit down.
rewind_point = len(self._workers)
if throttle > 0 and self.ALLOW_BASE_THROTTLING:
if task.run_once:
display.debug("Ignoring 'throttle' as 'run_once' is also set for '%s'" % task.get_name())
else:
if throttle <= rewind_point:
display.debug("task: %s, throttle: %d" % (task.get_name(), throttle))
rewind_point = throttle
queued = False
starting_worker = self._cur_worker
while True:
if self._cur_worker >= rewind_point:
self._cur_worker = 0
worker_prc = self._workers[self._cur_worker]
if worker_prc is None or not worker_prc.is_alive():
self._queued_task_cache[(host.name, task._uuid)] = {
'host': host,
'task': task,
'task_vars': task_vars,
'play_context': play_context
}
worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, plugin_loader)
self._workers[self._cur_worker] = worker_prc
self._tqm.send_callback('v2_runner_on_start', host, task)
worker_prc.start()
display.debug("worker is %d (out of %d available)" % (self._cur_worker + 1, len(self._workers)))
queued = True
self._cur_worker += 1
if self._cur_worker >= rewind_point:
self._cur_worker = 0
if queued:
break
elif self._cur_worker == starting_worker:
time.sleep(0.0001)
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
display.debug("got an error while queuing: %s" % e)
return
display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
def get_task_hosts(self, iterator, task_host, task):
if task.run_once:
host_list = [host for host in self._hosts_cache if host not in self._tqm._unreachable_hosts]
else:
host_list = [task_host.name]
return host_list
def get_delegated_hosts(self, result, task):
host_name = result.get('_ansible_delegated_vars', {}).get('ansible_delegated_host', None)
return [host_name or task.delegate_to]
def _set_always_delegated_facts(self, result, task):
"""Sets host facts for ``delegate_to`` hosts for facts that should
always be delegated
This operation mutates ``result`` to remove the always delegated facts
See ``ALWAYS_DELEGATE_FACT_PREFIXES``
"""
if task.delegate_to is None:
return
facts = result['ansible_facts']
always_keys = set()
_add = always_keys.add
for fact_key in facts:
for always_key in ALWAYS_DELEGATE_FACT_PREFIXES:
if fact_key.startswith(always_key):
_add(fact_key)
if always_keys:
_pop = facts.pop
always_facts = {
'ansible_facts': dict((k, _pop(k)) for k in list(facts) if k in always_keys)
}
host_list = self.get_delegated_hosts(result, task)
_set_host_facts = self._variable_manager.set_host_facts
for target_host in host_list:
_set_host_facts(target_host, always_facts)
def normalize_task_result(self, task_result):
"""Normalize a TaskResult to reference actual Host and Task objects
when only given the ``Host.name``, or the ``Task._uuid``
Only the ``Host.name`` and ``Task._uuid`` are commonly sent back from
the ``TaskExecutor`` or ``WorkerProcess`` due to performance concerns
Mutates the original object
"""
if isinstance(task_result._host, string_types):
# If the value is a string, it is ``Host.name``
task_result._host = self._inventory.get_host(to_text(task_result._host))
if isinstance(task_result._task, string_types):
# If the value is a string, it is ``Task._uuid``
queue_cache_entry = (task_result._host.name, task_result._task)
try:
found_task = self._queued_task_cache[queue_cache_entry]['task']
except KeyError:
# This should only happen due to an implicit task created by the
# TaskExecutor, restrict this behavior to the explicit use case
# of an implicit async_status task
if task_result._task_fields.get('action') != 'async_status':
raise
original_task = Task()
else:
original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
original_task._parent = found_task._parent
original_task.from_attrs(task_result._task_fields)
task_result._task = original_task
return task_result
@debug_closure
def _process_pending_results(self, iterator, one_pass=False, max_passes=None):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
handler_templar = Templar(self._loader)
def search_handler_blocks_by_name(handler_name, handler_blocks):
# iterate in reversed order since last handler loaded with the same name wins
for handler_block in reversed(handler_blocks):
for handler_task in handler_block.block:
if handler_task.name:
try:
if not handler_task.cached_name:
if handler_templar.is_template(handler_task.name):
handler_templar.available_variables = self._variable_manager.get_vars(play=iterator._play,
task=handler_task,
_hosts=self._hosts_cache,
_hosts_all=self._hosts_cache_all)
handler_task.name = handler_templar.template(handler_task.name)
handler_task.cached_name = True
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
candidates = (
handler_task.name,
handler_task.get_name(include_role_fqcn=False),
handler_task.get_name(include_role_fqcn=True),
)
if handler_name in candidates:
return handler_task
except (UndefinedError, AnsibleUndefinedVariable) as e:
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
if not handler_task.listen:
display.warning(
"Handler '%s' is unusable because it has no listen topics and "
"the name could not be templated (host-specific variables are "
"not supported in handler names). The error: %s" % (handler_task.name, to_text(e))
)
continue
cur_pass = 0
while True:
try:
self._results_lock.acquire()
task_result = self._results.popleft()
except IndexError:
break
finally:
self._results_lock.release()
original_host = task_result._host
original_task = task_result._task
# all host status messages contain 2 entries: (msg, task_result)
role_ran = False
if task_result.is_failed():
role_ran = True
ignore_errors = original_task.ignore_errors
if not ignore_errors:
display.debug("marking %s as failed" % original_host.name)
if original_task.run_once:
# if we're using run_once, we have to fail every host here
for h in self._inventory.get_hosts(iterator._play.hosts):
if h.name not in self._tqm._unreachable_hosts:
iterator.mark_host_failed(h)
else:
iterator.mark_host_failed(original_host)
state, _ = iterator.get_next_task_for_host(original_host, peek=True)
if iterator.is_failed(original_host) and state and state.run_state == IteratingStates.COMPLETE:
self._tqm._failed_hosts[original_host.name] = True
# if we're iterating on the rescue portion of a block then
# we save the failed task in a special var for use
# within the rescue/always
if iterator.is_any_block_rescuing(state):
self._tqm._stats.increment('rescued', original_host.name)
iterator._play._removed_hosts.remove(original_host.name)
self._variable_manager.set_nonpersistent_facts(
original_host.name,
dict(
ansible_failed_task=wrap_var(original_task.serialize()),
ansible_failed_result=task_result._result,
),
)
else:
self._tqm._stats.increment('failures', original_host.name)
else:
self._tqm._stats.increment('ok', original_host.name)
self._tqm._stats.increment('ignored', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors)
elif task_result.is_unreachable():
ignore_unreachable = original_task.ignore_unreachable
if not ignore_unreachable:
self._tqm._unreachable_hosts[original_host.name] = True
iterator._play._removed_hosts.append(original_host.name)
self._tqm._stats.increment('dark', original_host.name)
else:
self._tqm._stats.increment('ok', original_host.name)
self._tqm._stats.increment('ignored', original_host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif task_result.is_skipped():
self._tqm._stats.increment('skipped', original_host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
else:
role_ran = True
if original_task.loop:
# this task had a loop, and has more than one result, so
# loop over all of them instead of a single result
result_items = task_result._result.get('results', [])
else:
result_items = [task_result._result]
for result_item in result_items:
if '_ansible_notify' in result_item:
if task_result.is_changed():
# The shared dictionary for notified handlers is a proxy, which
# does not detect when sub-objects within the proxy are modified.
# So, per the docs, we reassign the list so the proxy picks up and
# notifies all other threads
for handler_name in result_item['_ansible_notify']:
found = False
# Find the handler using the above helper. First we look up the
# dependency chain of the current task (if it's from a role), otherwise
# we just look through the list of handlers in the current play/all
# roles and use the first one that matches the notify name
target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers)
if target_handler is not None:
found = True
if target_handler.notify_host(original_host):
self._tqm.send_callback('v2_playbook_on_notify', target_handler, original_host)
for listening_handler_block in iterator._play.handlers:
for listening_handler in listening_handler_block.block:
listeners = getattr(listening_handler, 'listen', []) or []
if not listeners:
continue
listeners = listening_handler.get_validated_value(
'listen', listening_handler.fattributes.get('listen'), listeners, handler_templar
)
if handler_name not in listeners:
continue
else:
found = True
if listening_handler.notify_host(original_host):
self._tqm.send_callback('v2_playbook_on_notify', listening_handler, original_host)
# and if none were found, then we raise an error
if not found:
msg = ("The requested handler '%s' was not found in either the main handlers list nor in the listening "
"handlers list" % handler_name)
if C.ERROR_ON_MISSING_HANDLER:
raise AnsibleError(msg)
else:
display.warning(msg)
if 'add_host' in result_item:
# this task added a new host (add_host module)
new_host_info = result_item.get('add_host', dict())
self._inventory.add_dynamic_host(new_host_info, result_item)
# ensure host is available for subsequent plays
if result_item.get('changed') and new_host_info['host_name'] not in self._hosts_cache_all:
self._hosts_cache_all.append(new_host_info['host_name'])
elif 'add_group' in result_item:
# this task added a new group (group_by module)
self._inventory.add_dynamic_group(original_host, result_item)
if 'add_host' in result_item or 'add_group' in result_item:
item_vars = _get_item_vars(result_item, original_task)
found_task_vars = self._queued_task_cache.get((original_host.name, task_result._task._uuid))['task_vars']
if item_vars:
all_task_vars = combine_vars(found_task_vars, item_vars)
else:
all_task_vars = found_task_vars
all_task_vars[original_task.register] = wrap_var(result_item)
post_process_whens(result_item, original_task, handler_templar, all_task_vars)
if original_task.loop or original_task.loop_with:
new_item_result = TaskResult(
task_result._host,
task_result._task,
result_item,
task_result._task_fields,
)
self._tqm.send_callback('v2_runner_item_on_ok', new_item_result)
if result_item.get('changed', False):
task_result._result['changed'] = True
if result_item.get('failed', False):
task_result._result['failed'] = True
if 'ansible_facts' in result_item and original_task.action not in C._ACTION_DEBUG:
# if delegated fact and we are delegating facts, we need to change target host for them
if original_task.delegate_to is not None and original_task.delegate_facts:
host_list = self.get_delegated_hosts(result_item, original_task)
else:
# Set facts that should always be on the delegated hosts
self._set_always_delegated_facts(result_item, original_task)
host_list = self.get_task_hosts(iterator, original_host, original_task)
if original_task.action in C._ACTION_INCLUDE_VARS:
for (var_name, var_value) in result_item['ansible_facts'].items():
# find the host we're actually referring too here, which may
# be a host that is not really in inventory at all
for target_host in host_list:
self._variable_manager.set_host_variable(target_host, var_name, var_value)
else:
cacheable = result_item.pop('_ansible_facts_cacheable', False)
for target_host in host_list:
# so set_fact is a misnomer but 'cacheable = true' was meant to create an 'actual fact'
# to avoid issues with precedence and confusion with set_fact normal operation,
# we set BOTH fact and nonpersistent_facts (aka hostvar)
# when fact is retrieved from cache in subsequent operations it will have the lower precedence,
# but for playbook setting it the 'higher' precedence is kept
is_set_fact = original_task.action in C._ACTION_SET_FACT
if not is_set_fact or cacheable:
self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy())
if is_set_fact:
self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy())
if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']:
if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']:
host_list = self.get_task_hosts(iterator, original_host, original_task)
else:
host_list = [None]
data = result_item['ansible_stats']['data']
aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate']
for myhost in host_list:
for k in data.keys():
if aggregate:
self._tqm._stats.update_custom_stats(k, data[k], myhost)
else:
self._tqm._stats.set_custom_stats(k, data[k], myhost)
if 'diff' in task_result._result:
if self._diff or getattr(original_task, 'diff', False):
self._tqm.send_callback('v2_on_file_diff', task_result)
if not isinstance(original_task, TaskInclude):
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
# finally, send the ok for this task
self._tqm.send_callback('v2_runner_on_ok', task_result)
# register final results
if original_task.register:
host_list = self.get_task_hosts(iterator, original_host, original_task)
clean_copy = strip_internal_keys(module_response_deepcopy(task_result._result))
if 'invocation' in clean_copy:
del clean_copy['invocation']
for target_host in host_list:
self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy})
self._pending_results -= 1
if original_host.name in self._blocked_hosts:
del self._blocked_hosts[original_host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if original_task._role is not None and role_ran: # TODO: and original_task.action not in C._ACTION_INCLUDE_ROLE:?
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in iterator._play.ROLE_CACHE[original_task._role.get_name()].items():
if role_obj._uuid == original_task._role._uuid:
role_obj._had_task_run[original_host.name] = True
ret_results.append(task_result)
if isinstance(original_task, Handler):
for handler in (h for b in iterator._play.handlers for h in b.block if h._uuid == original_task._uuid):
handler.remove_host(original_host)
if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes:
break
cur_pass += 1
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
display.debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator)
ret_results.extend(results)
if self._pending_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending results, returning what we have")
return ret_results
def _copy_included_file(self, included_file):
'''
A proven safe and performant way to create a copy of an included file
'''
ti_copy = included_file._task.copy(exclude_parent=True)
ti_copy._parent = included_file._task._parent
temp_vars = ti_copy.vars | included_file._vars
ti_copy.vars = temp_vars
return ti_copy
def _load_included_file(self, included_file, iterator, is_handler=False):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
Raises AnsibleError exception in case of a failure during including a file,
in such case the caller is responsible for marking the host(s) as failed
using PlayIterator.mark_host_failed().
'''
display.debug("loading included file: %s" % included_file._filename)
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
elif not isinstance(data, list):
raise AnsibleError("included task files must contain a list of tasks")
ti_copy = self._copy_included_file(included_file)
block_list = load_list_of_blocks(
data,
play=iterator._play,
parent_block=ti_copy.build_parent_block(),
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader,
variable_manager=self._variable_manager,
)
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
except AnsibleParserError:
raise
except AnsibleError as e:
if isinstance(e, AnsibleFileNotFound):
reason = "Could not find or access '%s' on the Ansible Controller." % to_text(e.file_name)
else:
reason = to_text(e)
for r in included_file._results:
r._result['failed'] = True
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=reason))
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
raise AnsibleError(reason) from e
# finally, send the callback and return the list of blocks loaded
self._tqm.send_callback('v2_playbook_on_include', included_file)
display.debug("done processing included file")
return block_list
def _take_step(self, task, host=None):
ret = False
msg = u'Perform task: %s ' % task
if host:
msg += u'on %s ' % host
msg += u'(N)o/(y)es/(c)ontinue: '
resp = display.prompt(msg)
if resp.lower() in ['y', 'yes']:
display.debug("User ran task")
ret = True
elif resp.lower() in ['c', 'continue']:
display.debug("User ran task and canceled step mode")
self._step = False
ret = True
else:
display.debug("User skipped task")
display.banner(msg)
return ret
def _cond_not_supported_warn(self, task_name):
display.warning("%s task does not support when conditional" % task_name)
def _execute_meta(self, task, play_context, iterator, target_host):
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
def _evaluate_conditional(h):
all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
templar = Templar(loader=self._loader, variables=all_vars)
return task.evaluate_conditional(templar, all_vars)
skipped = False
msg = ''
skip_reason = '%s conditional evaluated to False' % meta_action
if isinstance(task, Handler):
self._tqm.send_callback('v2_playbook_on_handler_task_start', task)
else:
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
# These don't support "when" conditionals
if meta_action in ('noop', 'refresh_inventory', 'reset_connection') and task.when:
self._cond_not_supported_warn(meta_action)
if meta_action == 'noop':
msg = "noop"
elif meta_action == 'flush_handlers':
if _evaluate_conditional(target_host):
host_state = iterator.get_state_for_host(target_host.name)
if host_state.run_state == IteratingStates.HANDLERS:
raise AnsibleError('flush_handlers cannot be used as a handler')
if target_host.name not in self._tqm._unreachable_hosts:
host_state.pre_flushing_run_state = host_state.run_state
host_state.run_state = IteratingStates.HANDLERS
msg = "triggered running handlers for %s" % target_host.name
else:
skipped = True
skip_reason += ', not running handlers for %s' % target_host.name
elif meta_action == 'refresh_inventory':
self._inventory.refresh_inventory()
self._set_hosts_cache(iterator._play)
msg = "inventory successfully refreshed"
elif meta_action == 'clear_facts':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
hostname = host.get_name()
self._variable_manager.clear_facts(hostname)
msg = "facts cleared"
else:
skipped = True
skip_reason += ', not clearing facts and fact cache for %s' % target_host.name
elif meta_action == 'clear_host_errors':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
self._tqm._failed_hosts.pop(host.name, False)
self._tqm._unreachable_hosts.pop(host.name, False)
iterator.clear_host_errors(host)
msg = "cleared host errors"
else:
skipped = True
skip_reason += ', not clearing host error state for %s' % target_host.name
elif meta_action == 'end_batch':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator.set_run_state_for_host(host.name, IteratingStates.COMPLETE)
msg = "ending batch"
else:
skipped = True
skip_reason += ', continuing current batch'
elif meta_action == 'end_play':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator.set_run_state_for_host(host.name, IteratingStates.COMPLETE)
# end_play is used in PlaybookExecutor/TQM to indicate that
# the whole play is supposed to be ended as opposed to just a batch
iterator.end_play = True
msg = "ending play"
else:
skipped = True
skip_reason += ', continuing play'
elif meta_action == 'end_host':
if _evaluate_conditional(target_host):
iterator.set_run_state_for_host(target_host.name, IteratingStates.COMPLETE)
iterator._play._removed_hosts.append(target_host.name)
msg = "ending play for %s" % target_host.name
else:
skipped = True
skip_reason += ", continuing execution for %s" % target_host.name
# TODO: Nix msg here? Left for historical reasons, but skip_reason exists now.
msg = "end_host conditional evaluated to false, continuing execution for %s" % target_host.name
elif meta_action == 'role_complete':
# Allow users to use this in a play as reported in https://github.com/ansible/ansible/issues/22286?
# How would this work with allow_duplicates??
if task.implicit:
if target_host.name in task._role._had_task_run:
task._role._completed[target_host.name] = True
msg = 'role_complete for %s' % target_host.name
elif meta_action == 'reset_connection':
all_vars = self._variable_manager.get_vars(play=iterator._play, host=target_host, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
templar = Templar(loader=self._loader, variables=all_vars)
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
play_context = play_context.set_task_and_variable_override(task=task, variables=all_vars, templar=templar)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
play_context.post_validate(templar=templar)
# now that the play context is finalized, if the remote_addr is not set
# default to using the host's address field as the remote address
if not play_context.remote_addr:
play_context.remote_addr = target_host.address
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist. This 'mostly' works here cause meta
# disregards the loop, but should not really use play_context at all
play_context.update_vars(all_vars)
if target_host in self._active_connections:
connection = Connection(self._active_connections[target_host])
del self._active_connections[target_host]
else:
connection = plugin_loader.connection_loader.get(play_context.connection, play_context, os.devnull)
connection.set_options(task_keys=task.dump_attrs(), var_options=all_vars)
play_context.set_attributes_from_plugin(connection)
if connection:
try:
connection.reset()
msg = 'reset connection'
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
else:
msg = 'no connection, nothing to reset'
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
result = {'msg': msg}
if skipped:
result['skipped'] = True
result['skip_reason'] = skip_reason
else:
result['changed'] = False
display.vv("META: %s" % msg)
if isinstance(task, Handler):
task.remove_host(target_host)
res = TaskResult(target_host, task, result)
if skipped:
self._tqm.send_callback('v2_runner_on_skipped', res)
return [res]
def get_hosts_left(self, iterator):
''' returns list of available hosts for this iterator by filtering out unreachables '''
hosts_left = []
for host in self._hosts_cache:
if host not in self._tqm._unreachable_hosts:
try:
hosts_left.append(self._inventory.hosts[host])
except KeyError:
hosts_left.append(self._inventory.get_host(host))
return hosts_left
def update_active_connections(self, results):
''' updates the current active persistent connections '''
for r in results:
if 'args' in r._task_fields:
socket_path = r._task_fields['args'].get('_ansible_socket')
if socket_path:
if r._host not in self._active_connections:
self._active_connections[r._host] = socket_path
class NextAction(object):
""" The next action after an interpreter's exit. """
REDO = 1
CONTINUE = 2
EXIT = 3
def __init__(self, result=EXIT):
self.result = result
class Debugger(cmd.Cmd):
prompt_continuous = '> ' # multiple lines
def __init__(self, task, host, task_vars, play_context, result, next_action):
# cmd.Cmd is old-style class
cmd.Cmd.__init__(self)
self.prompt = '[%s] %s (debug)> ' % (host, task)
self.intro = None
self.scope = {}
self.scope['task'] = task
self.scope['task_vars'] = task_vars
self.scope['host'] = host
self.scope['play_context'] = play_context
self.scope['result'] = result
self.next_action = next_action
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
pass
do_h = cmd.Cmd.do_help
def do_EOF(self, args):
"""Quit"""
return self.do_quit(args)
def do_quit(self, args):
"""Quit"""
display.display('User interrupted execution')
self.next_action.result = NextAction.EXIT
return True
do_q = do_quit
def do_continue(self, args):
"""Continue to next result"""
self.next_action.result = NextAction.CONTINUE
return True
do_c = do_continue
def do_redo(self, args):
"""Schedule task for re-execution. The re-execution may not be the next result"""
self.next_action.result = NextAction.REDO
return True
do_r = do_redo
def do_update_task(self, args):
"""Recreate the task from ``task._ds``, and template with updated ``task_vars``"""
templar = Templar(None, variables=self.scope['task_vars'])
task = self.scope['task']
task = task.load_data(task._ds)
task.post_validate(templar)
self.scope['task'] = task
do_u = do_update_task
def evaluate(self, args):
try:
return eval(args, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def do_pprint(self, args):
"""Pretty Print"""
try:
result = self.evaluate(args)
display.display(pprint.pformat(result))
except Exception:
pass
do_p = do_pprint
def execute(self, args):
try:
code = compile(args + '\n', '<stdin>', 'single')
exec(code, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def default(self, line):
try:
self.execute(line)
except Exception:
pass
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 77,315 |
Empty "META: " lines printed
|
### Summary
My complex `site.yml` causes "empty" `META: ` lines being printed often together with other `META: ` lines such as `ran handlers`, `role_complete`.
The line is:
```
META:
```
There is a space after the `META:` but after that there isn't any reason such as `ran handlers` or `role_complete`.
Normal/valid `META: ` lines with a "reason" often but not always have these "empty" `META: ` lines near them.
I cannot duplicate this issue with a simple playbook or a simple playbook + role. My `site.yml` uses multiple layers of `include_role` and so on so it is difficult to try to simplify it into test case. But perhaps it could be possible to look at how that "empty" `META: ` line could be possible.
### Issue Type
Bug Report
### Component Name
ansible
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.9]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/x/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /y/lib64/python3.6/site-packages/ansible
ansible collection location = /x/.ansible/collections:/usr/share/ansible/collections
executable location = /y/bin/ansible
python version = 3.6.8 (default, Nov 16 2020, 16:55:22) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)]
jinja version = 3.0.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
CACHE_PLUGIN(/etc/ansible/ansible.cfg) = community.general.yaml
CACHE_PLUGIN_CONNECTION(/etc/ansible/ansible.cfg) = /tmp/.ansible-fact.$USER
CACHE_PLUGIN_TIMEOUT(/etc/ansible/ansible.cfg) = 5184000
CALLBACKS_ENABLED(/etc/ansible/ansible.cfg) = ['ansible.posix.timer']
CONTROLLER_PYTHON_WARNING(/etc/ansible/ansible.cfg) = False
DEFAULT_FORKS(/etc/ansible/ansible.cfg) = 10
DEFAULT_LOCAL_TMP(/etc/ansible/ansible.cfg) = /tmp/.ansible.x/ansible-local-20276c69v2u7m
DEFAULT_LOG_PATH(/etc/ansible/ansible.cfg) = /var/log/ansible.log
DEFAULT_STDOUT_CALLBACK(/etc/ansible/ansible.cfg) = community.general.yaml
DEFAULT_STRATEGY(/etc/ansible/ansible.cfg) = ansible.builtin.free
DEFAULT_TIMEOUT(/etc/ansible/ansible.cfg) = 50
DEFAULT_VAULT_IDENTITY_LIST(env: ANSIBLE_VAULT_IDENTITY_LIST) = ['a@v1', 'b@v2']
DEFAULT_VAULT_ID_MATCH(/etc/ansible/ansible.cfg) = True
INJECT_FACTS_AS_VARS(/etc/ansible/ansible.cfg) = False
RETRY_FILES_SAVE_PATH(/etc/ansible/ansible.cfg) = /tmp/.ansible-retry.x
```
### OS / Environment
EL7
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
could not reproduce with a naive test case, it is visible in my site.yml that contains total ~500 .yml files across dozens of roles
```
### Expected Results
There should be no spurious `META: ` lines with no further information on what it is about.
### Actual Results
```console
example 1:
2022-03-17 09:34:47,350 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,359 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,368 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,377 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,386 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,396 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,405 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,415 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,425 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,435 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,443 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,452 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,461 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,470 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,479 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:47,597 p=13119 u=user n=ansible | ok: [host.example.com]
2022-03-17 09:34:47,612 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,621 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,631 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,640 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,649 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,659 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,669 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,679 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,689 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,698 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,706 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,715 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,724 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,733 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,741 p=13119 u=user n=ansible | META:
2022-03-17 09:34:47,751 p=13119 u=user n=ansible | META: ran handlers
2022-03-17 09:34:48,056 p=13119 u=user n=ansible | META:
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/77315
|
https://github.com/ansible/ansible/pull/78681
|
a26c325bd8f6e2822d9d7e62f77a424c1db4fbf6
|
a6d4c3ff7cf43c24be6622102cee834fc5096496
| 2022-03-18T07:29:16Z |
python
| 2022-09-13T07:50:10Z |
test/integration/targets/meta_tasks/runme.sh
|
#!/usr/bin/env bash
set -eux
# test end_host meta task, with when conditional
for test_strategy in linear free; do
out="$(ansible-playbook test_end_host.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
grep -q "META: end_host conditional evaluated to false, continuing execution for testhost" <<< "$out"
grep -q "META: ending play for testhost2" <<< "$out"
grep -q '"skip_reason": "end_host conditional evaluated to False, continuing execution for testhost"' <<< "$out"
grep -q "play not ended for testhost" <<< "$out"
grep -qv "play not ended for testhost2" <<< "$out"
out="$(ansible-playbook test_end_host_fqcn.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
grep -q "META: end_host conditional evaluated to false, continuing execution for testhost" <<< "$out"
grep -q "META: ending play for testhost2" <<< "$out"
grep -q '"skip_reason": "end_host conditional evaluated to False, continuing execution for testhost"' <<< "$out"
grep -q "play not ended for testhost" <<< "$out"
grep -qv "play not ended for testhost2" <<< "$out"
done
# test end_host meta task, on all hosts
for test_strategy in linear free; do
out="$(ansible-playbook test_end_host_all.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
grep -q "META: ending play for testhost" <<< "$out"
grep -q "META: ending play for testhost2" <<< "$out"
grep -qv "play not ended for testhost" <<< "$out"
grep -qv "play not ended for testhost2" <<< "$out"
out="$(ansible-playbook test_end_host_all_fqcn.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
grep -q "META: ending play for testhost" <<< "$out"
grep -q "META: ending play for testhost2" <<< "$out"
grep -qv "play not ended for testhost" <<< "$out"
grep -qv "play not ended for testhost2" <<< "$out"
done
# test end_play meta task
for test_strategy in linear free; do
out="$(ansible-playbook test_end_play.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
grep -q "META: ending play" <<< "$out"
grep -qv 'Failed to end using end_play' <<< "$out"
out="$(ansible-playbook test_end_play_fqcn.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
grep -q "META: ending play" <<< "$out"
grep -qv 'Failed to end using end_play' <<< "$out"
out="$(ansible-playbook test_end_play_serial_one.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
[ "$(grep -c "Testing end_play on host" <<< "$out" )" -eq 1 ]
grep -q "META: ending play" <<< "$out"
grep -qv 'Failed to end using end_play' <<< "$out"
out="$(ansible-playbook test_end_play_multiple_plays.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
grep -q "META: ending play" <<< "$out"
grep -q "Play 1" <<< "$out"
grep -q "Play 2" <<< "$out"
grep -qv 'Failed to end using end_play' <<< "$out"
done
# test end_batch meta task
for test_strategy in linear free; do
out="$(ansible-playbook test_end_batch.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
[ "$(grep -c "Using end_batch" <<< "$out" )" -eq 2 ]
[ "$(grep -c "META: ending batch" <<< "$out" )" -eq 2 ]
grep -qv 'Failed to end_batch' <<< "$out"
done
# test refresh
ansible-playbook -i inventory_refresh.yml refresh.yml "$@"
ansible-playbook -i inventory_refresh.yml refresh_preserve_dynamic.yml "$@"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.